Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/common/gpu/texture_image_transport_surface.h" | 5 #include "content/common/gpu/texture_image_transport_surface.h" |
| 6 | 6 |
| 7 #include "content/common/gpu/gpu_channel.h" | 7 #include "content/common/gpu/gpu_channel.h" |
| 8 #include "content/common/gpu/gpu_channel_manager.h" | 8 #include "content/common/gpu/gpu_channel_manager.h" |
| 9 #include "content/common/gpu/gpu_messages.h" | 9 #include "content/common/gpu/gpu_messages.h" |
| 10 #include "gpu/command_buffer/service/context_group.h" | 10 #include "gpu/command_buffer/service/context_group.h" |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 130 if (stub_destroyed_) { | 130 if (stub_destroyed_) { |
| 131 // Early-exit so that we don't recreate the fbo. We still want to return | 131 // Early-exit so that we don't recreate the fbo. We still want to return |
| 132 // true, so that the context is made current and the GLES2DecoderImpl can | 132 // true, so that the context is made current and the GLES2DecoderImpl can |
| 133 // release its own resources. | 133 // release its own resources. |
| 134 return true; | 134 return true; |
| 135 } | 135 } |
| 136 | 136 |
| 137 if (!fbo_id_) { | 137 if (!fbo_id_) { |
| 138 glGenFramebuffersEXT(1, &fbo_id_); | 138 glGenFramebuffersEXT(1, &fbo_id_); |
| 139 glBindFramebufferEXT(GL_FRAMEBUFFER, fbo_id_); | 139 glBindFramebufferEXT(GL_FRAMEBUFFER, fbo_id_); |
| 140 CreateBackTexture(gfx::Size(1, 1)); | 140 CreateTexture(back(), gfx::Size(1, 1)); |
| 141 | 141 |
| 142 #ifndef NDEBUG | 142 #ifndef NDEBUG |
| 143 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); | 143 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); |
| 144 if (status != GL_FRAMEBUFFER_COMPLETE) { | 144 if (status != GL_FRAMEBUFFER_COMPLETE) { |
| 145 DLOG(ERROR) << "Framebuffer incomplete."; | 145 DLOG(ERROR) << "Framebuffer incomplete."; |
| 146 return false; | 146 return false; |
| 147 } | 147 } |
| 148 #endif | 148 #endif |
| 149 } | 149 } |
| 150 | 150 |
| 151 return true; | 151 return true; |
| 152 } | 152 } |
| 153 | 153 |
| 154 unsigned int TextureImageTransportSurface::GetBackingFrameBufferObject() { | 154 unsigned int TextureImageTransportSurface::GetBackingFrameBufferObject() { |
| 155 return fbo_id_; | 155 return fbo_id_; |
| 156 } | 156 } |
| 157 | 157 |
| 158 void TextureImageTransportSurface::SetBufferAllocation( | 158 void TextureImageTransportSurface::SetBackbufferAllocation(bool allocation) { |
| 159 BufferAllocationState state) { | 159 DCHECK(textures_[back()].info); |
| 160 if (static_cast<bool>(textures_[back()].info->service_id()) == allocation && | |
|
piman
2012/05/04 00:03:49
nit: use !! (or != 0) instead of static_cast<bool>
mmocny
2012/05/04 18:55:52
Thanks.
On 2012/05/04 00:03:49, piman wrote:
| |
| 161 textures_[back()].suggested_allocation == allocation) | |
| 162 return; | |
| 160 if (!helper_->MakeCurrent()) | 163 if (!helper_->MakeCurrent()) |
| 161 return; | 164 return; |
| 162 switch (state) { | 165 if (allocation) { |
| 163 case BUFFER_ALLOCATION_FRONT_AND_BACK: | 166 CreateTexture(back(), textures_[back()].size); |
| 164 CreateBackTexture(textures_[back()].size); | 167 } else { |
| 165 break; | 168 textures_[back()].suggested_allocation = false; |
| 166 case BUFFER_ALLOCATION_FRONT_ONLY: | 169 ReleaseTexture(back()); |
| 167 case BUFFER_ALLOCATION_NONE: | 170 } |
| 168 ReleaseBackTexture(); | 171 } |
| 169 break; | 172 |
| 170 }; | 173 void TextureImageTransportSurface::SetFrontbufferAllocation(bool allocation) { |
| 174 DCHECK(textures_[front()].info); | |
| 175 if (static_cast<bool>(textures_[front()].info->service_id()) == | |
|
piman
2012/05/04 00:03:49
nit: use !! (or != 0) instead of static_cast<bool>
| |
| 176 allocation && textures_[front()].suggested_allocation == allocation) | |
| 177 return; | |
| 178 if (!helper_->MakeCurrent()) | |
| 179 return; | |
|
piman
2012/05/04 00:03:49
should we set textures_[front()].suggested_allocat
mmocny
2012/05/04 18:55:52
No -- hopefully my previous explanation of what th
| |
| 180 // Note: we don't actually allocate frontbuffer now, we just fiddle the | |
| 181 // right state so that it is reallocated during the next swap. | |
| 182 if (allocation) | |
| 183 textures_[front()].size = gfx::Size(); | |
| 184 else | |
| 185 RequestReleaseTexture(front()); | |
| 171 } | 186 } |
| 172 | 187 |
| 173 void* TextureImageTransportSurface::GetShareHandle() { | 188 void* TextureImageTransportSurface::GetShareHandle() { |
| 174 return GetHandle(); | 189 return GetHandle(); |
| 175 } | 190 } |
| 176 | 191 |
| 177 void* TextureImageTransportSurface::GetDisplay() { | 192 void* TextureImageTransportSurface::GetDisplay() { |
| 178 return parent_stub_ ? parent_stub_->surface()->GetDisplay() : NULL; | 193 return parent_stub_ ? parent_stub_->surface()->GetDisplay() : NULL; |
| 179 } | 194 } |
| 180 | 195 |
| 181 void* TextureImageTransportSurface::GetConfig() { | 196 void* TextureImageTransportSurface::GetConfig() { |
| 182 return parent_stub_ ? parent_stub_->surface()->GetConfig() : NULL; | 197 return parent_stub_ ? parent_stub_->surface()->GetConfig() : NULL; |
| 183 } | 198 } |
| 184 | 199 |
| 185 void TextureImageTransportSurface::OnResize(gfx::Size size) { | 200 void TextureImageTransportSurface::OnResize(gfx::Size size) { |
| 186 CreateBackTexture(size); | 201 CreateTexture(back(), size); |
| 187 } | 202 } |
| 188 | 203 |
| 189 void TextureImageTransportSurface::OnWillDestroyStub( | 204 void TextureImageTransportSurface::OnWillDestroyStub( |
| 190 GpuCommandBufferStub* stub) { | 205 GpuCommandBufferStub* stub) { |
| 191 if (stub == parent_stub_) { | 206 if (stub == parent_stub_) { |
| 192 ReleaseParentStub(); | 207 ReleaseParentStub(); |
| 193 } else { | 208 } else { |
| 194 stub->RemoveDestructionObserver(this); | 209 stub->RemoveDestructionObserver(this); |
| 195 // We are losing the stub owning us, this is our last chance to clean up the | 210 // We are losing the stub owning us, this is our last chance to clean up the |
| 196 // resources we allocated in the stub's context. | 211 // resources we allocated in the stub's context. |
| 197 glDeleteFramebuffersEXT(1, &fbo_id_); | 212 glDeleteFramebuffersEXT(1, &fbo_id_); |
| 198 CHECK_GL_ERROR(); | 213 CHECK_GL_ERROR(); |
| 199 fbo_id_ = 0; | 214 fbo_id_ = 0; |
| 200 | 215 |
| 201 stub_destroyed_ = true; | 216 stub_destroyed_ = true; |
| 202 } | 217 } |
| 203 } | 218 } |
| 204 | 219 |
| 205 bool TextureImageTransportSurface::SwapBuffers() { | 220 bool TextureImageTransportSurface::SwapBuffers() { |
| 221 DCHECK(textures_[back()].info); | |
| 222 DCHECK(textures_[back()].info->service_id()); | |
| 206 if (!parent_stub_) { | 223 if (!parent_stub_) { |
| 207 LOG(ERROR) << "SwapBuffers failed because no parent stub."; | 224 LOG(ERROR) << "SwapBuffers failed because no parent stub."; |
| 208 return false; | 225 return false; |
| 209 } | 226 } |
| 227 DCHECK(textures_[front()].info); | |
| 228 if (!textures_[front()].info->service_id() && | |
| 229 textures_[front()].size != gfx::Size()) | |
| 230 return true; | |
| 210 | 231 |
| 211 glFlush(); | 232 glFlush(); |
| 212 front_ = back(); | 233 front_ = back(); |
| 213 previous_damage_rect_ = gfx::Rect(textures_[front_].size); | 234 previous_damage_rect_ = gfx::Rect(textures_[front()].size); |
| 214 | 235 |
| 215 DCHECK(textures_[front_].client_id != 0); | 236 DCHECK(textures_[front()].client_id != 0); |
| 216 | 237 |
| 217 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params; | 238 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params; |
| 218 params.surface_handle = textures_[front_].client_id; | 239 params.surface_handle = textures_[front()].client_id; |
| 219 helper_->SendAcceleratedSurfaceBuffersSwapped(params); | 240 helper_->SendAcceleratedSurfaceBuffersSwapped(params); |
| 220 helper_->SetScheduled(false); | 241 helper_->SetScheduled(false); |
| 221 return true; | 242 return true; |
| 222 } | 243 } |
| 223 | 244 |
| 224 bool TextureImageTransportSurface::PostSubBuffer( | 245 bool TextureImageTransportSurface::PostSubBuffer( |
| 225 int x, int y, int width, int height) { | 246 int x, int y, int width, int height) { |
| 247 DCHECK(textures_[back()].info); | |
| 248 DCHECK(textures_[back()].info->service_id()); | |
| 226 if (!parent_stub_) { | 249 if (!parent_stub_) { |
| 227 LOG(ERROR) << "PostSubBuffer failed because no parent stub."; | 250 LOG(ERROR) << "PostSubBuffer failed because no parent stub."; |
| 228 return false; | 251 return false; |
| 229 } | 252 } |
| 230 | 253 DCHECK(textures_[front()].info); |
| 231 DCHECK(textures_[back()].info); | 254 if (!textures_[front()].info->service_id() && |
| 232 int back_texture_service_id = textures_[back()].info->service_id(); | 255 textures_[front()].size != gfx::Size()) |
| 233 | 256 return true; |
| 234 DCHECK(textures_[front_].info); | |
| 235 int front_texture_service_id = textures_[front_].info->service_id(); | |
| 236 | |
| 237 gfx::Size expected_size = textures_[back()].size; | |
| 238 bool surfaces_same_size = textures_[front_].size == expected_size; | |
| 239 | 257 |
| 240 const gfx::Rect new_damage_rect(x, y, width, height); | 258 const gfx::Rect new_damage_rect(x, y, width, height); |
| 241 | 259 |
| 242 // An empty damage rect is a successful no-op. | 260 // An empty damage rect is a successful no-op. |
| 243 if (new_damage_rect.IsEmpty()) | 261 if (new_damage_rect.IsEmpty()) |
| 244 return true; | 262 return true; |
| 245 | 263 |
| 264 int back_texture_service_id = textures_[back()].info->service_id(); | |
| 265 int front_texture_service_id = textures_[front()].info->service_id(); | |
| 266 | |
| 267 gfx::Size expected_size = textures_[back()].size; | |
| 268 bool surfaces_same_size = textures_[front()].size == expected_size; | |
| 269 | |
| 246 if (surfaces_same_size) { | 270 if (surfaces_same_size) { |
| 247 std::vector<gfx::Rect> regions_to_copy; | 271 std::vector<gfx::Rect> regions_to_copy; |
| 248 GetRegionsToCopy(previous_damage_rect_, new_damage_rect, ®ions_to_copy); | 272 GetRegionsToCopy(previous_damage_rect_, new_damage_rect, ®ions_to_copy); |
| 249 | 273 |
| 250 ScopedFrameBufferBinder fbo_binder(fbo_id_); | 274 ScopedFrameBufferBinder fbo_binder(fbo_id_); |
| 251 glFramebufferTexture2DEXT(GL_FRAMEBUFFER, | 275 glFramebufferTexture2DEXT(GL_FRAMEBUFFER, |
| 252 GL_COLOR_ATTACHMENT0, | 276 GL_COLOR_ATTACHMENT0, |
| 253 GL_TEXTURE_2D, | 277 GL_TEXTURE_2D, |
| 254 front_texture_service_id, | 278 front_texture_service_id, |
| 255 0); | 279 0); |
| 256 ScopedTextureBinder texture_binder(back_texture_service_id); | 280 ScopedTextureBinder texture_binder(back_texture_service_id); |
| 257 | 281 |
| 258 for (size_t i = 0; i < regions_to_copy.size(); ++i) { | 282 for (size_t i = 0; i < regions_to_copy.size(); ++i) { |
| 259 const gfx::Rect& region_to_copy = regions_to_copy[i]; | 283 const gfx::Rect& region_to_copy = regions_to_copy[i]; |
| 260 if (!region_to_copy.IsEmpty()) { | 284 if (!region_to_copy.IsEmpty()) { |
| 261 glCopyTexSubImage2D(GL_TEXTURE_2D, 0, region_to_copy.x(), | 285 glCopyTexSubImage2D(GL_TEXTURE_2D, 0, region_to_copy.x(), |
| 262 region_to_copy.y(), region_to_copy.x(), region_to_copy.y(), | 286 region_to_copy.y(), region_to_copy.x(), region_to_copy.y(), |
| 263 region_to_copy.width(), region_to_copy.height()); | 287 region_to_copy.width(), region_to_copy.height()); |
| 264 } | 288 } |
| 265 } | 289 } |
| 266 } else { | 290 } else { |
| 267 DCHECK(new_damage_rect == gfx::Rect(expected_size)); | 291 DCHECK(new_damage_rect == gfx::Rect(expected_size)); |
| 268 } | 292 } |
| 269 | 293 |
| 270 glFlush(); | 294 glFlush(); |
| 271 front_ = back(); | 295 front_ = back(); |
| 296 previous_damage_rect_ = new_damage_rect; | |
| 297 | |
| 298 DCHECK(textures_[front()].client_id != 0); | |
| 272 | 299 |
| 273 GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params params; | 300 GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params params; |
| 274 params.surface_handle = textures_[front_].client_id; | 301 params.surface_handle = textures_[front()].client_id; |
| 275 params.x = x; | 302 params.x = x; |
| 276 params.y = y; | 303 params.y = y; |
| 277 params.width = width; | 304 params.width = width; |
| 278 params.height = height; | 305 params.height = height; |
| 279 helper_->SendAcceleratedSurfacePostSubBuffer(params); | 306 helper_->SendAcceleratedSurfacePostSubBuffer(params); |
| 280 helper_->SetScheduled(false); | 307 helper_->SetScheduled(false); |
| 281 | |
| 282 previous_damage_rect_ = new_damage_rect; | |
| 283 return true; | 308 return true; |
| 284 } | 309 } |
| 285 | 310 |
| 286 std::string TextureImageTransportSurface::GetExtensions() { | 311 std::string TextureImageTransportSurface::GetExtensions() { |
| 287 std::string extensions = gfx::GLSurface::GetExtensions(); | 312 std::string extensions = gfx::GLSurface::GetExtensions(); |
| 288 extensions += extensions.empty() ? "" : " "; | 313 extensions += extensions.empty() ? "" : " "; |
| 289 extensions += "GL_CHROMIUM_front_buffer_cached "; | 314 extensions += "GL_CHROMIUM_front_buffer_cached "; |
| 290 extensions += "GL_CHROMIUM_post_sub_buffer"; | 315 extensions += "GL_CHROMIUM_post_sub_buffer"; |
| 291 return extensions; | 316 return extensions; |
| 292 } | 317 } |
| 293 | 318 |
| 294 gfx::Size TextureImageTransportSurface::GetSize() { | 319 gfx::Size TextureImageTransportSurface::GetSize() { |
| 295 return textures_[back()].size; | 320 return textures_[back()].size; |
| 296 } | 321 } |
| 297 | 322 |
| 298 void* TextureImageTransportSurface::GetHandle() { | 323 void* TextureImageTransportSurface::GetHandle() { |
| 299 return parent_stub_ ? parent_stub_->surface()->GetHandle() : NULL; | 324 return parent_stub_ ? parent_stub_->surface()->GetHandle() : NULL; |
| 300 } | 325 } |
| 301 | 326 |
| 302 | 327 |
| 303 void TextureImageTransportSurface::OnNewSurfaceACK( | 328 void TextureImageTransportSurface::OnNewSurfaceACK( |
| 304 uint64 surface_handle, TransportDIB::Handle /*shm_handle*/) { | 329 uint64 surface_handle, TransportDIB::Handle /*shm_handle*/) { |
| 305 } | 330 } |
| 306 | 331 |
| 332 void TextureImageTransportSurface::OnDiscardSurface( | |
| 333 uint64 surface_id) { | |
|
piman
2012/05/04 00:03:49
Why do we need the id here? This is called by the
mmocny
2012/05/04 18:55:52
Originally, this was here to protect from releasin
| |
| 334 ReleaseTexture(textures_[front()].client_id == surface_id ? | |
| 335 front() : back()); | |
| 336 } | |
| 337 | |
| 307 void TextureImageTransportSurface::OnBuffersSwappedACK() { | 338 void TextureImageTransportSurface::OnBuffersSwappedACK() { |
| 308 if (helper_->MakeCurrent()) { | 339 if (helper_->MakeCurrent()) { |
| 309 if (textures_[front_].size != textures_[back()].size) { | 340 if (textures_[front()].size != textures_[back()].size) { |
| 310 CreateBackTexture(textures_[front_].size); | 341 CreateTexture(back(), textures_[front()].size); |
| 311 } else { | 342 } else { |
| 312 AttachBackTextureToFBO(); | 343 AttachTextureToFBO(back()); |
| 313 } | 344 } |
| 314 } | 345 } |
| 315 | 346 |
| 316 // Even if MakeCurrent fails, schedule anyway, to trigger the lost context | 347 // Even if MakeCurrent fails, schedule anyway, to trigger the lost context |
| 317 // logic. | 348 // logic. |
| 318 helper_->SetScheduled(true); | 349 helper_->SetScheduled(true); |
| 319 } | 350 } |
| 320 | 351 |
| 321 void TextureImageTransportSurface::OnPostSubBufferACK() { | 352 void TextureImageTransportSurface::OnPostSubBufferACK() { |
| 322 OnBuffersSwappedACK(); | 353 OnBuffersSwappedACK(); |
| 323 } | 354 } |
| 324 | 355 |
| 325 void TextureImageTransportSurface::OnResizeViewACK() { | 356 void TextureImageTransportSurface::OnResizeViewACK() { |
| 326 NOTREACHED(); | 357 NOTREACHED(); |
| 327 } | 358 } |
| 328 | 359 |
| 329 void TextureImageTransportSurface::ReleaseBackTexture() { | 360 void TextureImageTransportSurface::RequestReleaseTexture(int id) { |
| 361 Texture& texture = textures_[id]; | |
| 362 texture.suggested_allocation = false; | |
| 363 if (!texture.sent_to_client) { | |
| 364 ReleaseTexture(id); | |
| 365 return; | |
| 366 } | |
| 367 GpuHostMsg_AcceleratedSurfaceSuggestDiscard_Params params; | |
| 368 params.identifier = texture.client_id; | |
| 369 helper_->SendAcceleratedSurfaceSuggestDiscard(params); | |
| 370 } | |
| 371 | |
| 372 void TextureImageTransportSurface::ReleaseTexture(int id) { | |
| 330 if (!parent_stub_) | 373 if (!parent_stub_) |
| 331 return; | 374 return; |
| 332 TextureInfo* info = textures_[back()].info; | 375 Texture& texture = textures_[id]; |
| 376 TextureInfo* info = texture.info; | |
| 333 DCHECK(info); | 377 DCHECK(info); |
| 334 | 378 |
| 379 texture.sent_to_client = false; | |
| 380 if (texture.suggested_allocation) | |
| 381 return; | |
| 335 GLuint service_id = info->service_id(); | 382 GLuint service_id = info->service_id(); |
| 336 if (!service_id) | 383 if (!service_id) |
| 337 return; | 384 return; |
| 338 info->SetServiceId(0); | 385 info->SetServiceId(0); |
| 339 | 386 |
| 387 | |
| 340 { | 388 { |
| 341 ScopedFrameBufferBinder fbo_binder(fbo_id_); | 389 ScopedFrameBufferBinder fbo_binder(fbo_id_); |
| 342 glDeleteTextures(1, &service_id); | 390 glDeleteTextures(1, &service_id); |
| 343 } | 391 } |
| 344 glFlush(); | 392 glFlush(); |
| 345 CHECK_GL_ERROR(); | 393 CHECK_GL_ERROR(); |
| 394 | |
| 395 if (id == front()) | |
| 396 previous_damage_rect_ = gfx::Rect(); | |
| 346 } | 397 } |
| 347 | 398 |
| 348 void TextureImageTransportSurface::CreateBackTexture(const gfx::Size& size) { | 399 void TextureImageTransportSurface::CreateTexture( |
| 400 int id, const gfx::Size& size) { | |
|
piman
2012/05/04 00:03:49
AFAICT you only call CreateTexture(back(), ...), w
mmocny
2012/05/04 18:55:52
done.
On 2012/05/04 00:03:49, piman wrote:
| |
| 349 if (!parent_stub_) | 401 if (!parent_stub_) |
| 350 return; | 402 return; |
| 351 Texture& texture = textures_[back()]; | 403 Texture& texture = textures_[id]; |
| 352 TextureInfo* info = texture.info; | 404 TextureInfo* info = texture.info; |
| 353 DCHECK(info); | 405 DCHECK(info); |
| 354 | 406 |
| 355 GLuint service_id = info->service_id(); | 407 GLuint service_id = info->service_id(); |
| 356 | 408 |
| 357 if (service_id && texture.size == size) | 409 // If the suggested allocation is true, and we are already allocated and the |
| 410 // right size, we can early exit. | |
| 411 if (service_id && | |
| 412 texture.size == size && | |
| 413 texture.suggested_allocation) { | |
| 414 DCHECK(texture.sent_to_client); | |
| 358 return; | 415 return; |
| 416 } | |
| 417 // If the suggested allocation is false, that means we've sent a suggestion to | |
| 418 // release surface, and may possibly soon get an ack to actually release, yet | |
| 419 // we now we have a request to create, and would thus like to ignore that | |
| 420 // release. We do this by resettign suggested_allocation and sending an | |
|
piman
2012/05/04 00:03:49
typo: resettign->resetting
mmocny
2012/05/04 18:55:52
done.
On 2012/05/04 00:03:49, piman wrote:
| |
| 421 // AcceleratedSurfaceNew to give the ui access to the surface again, | |
| 422 // though me may not need to generate textures or resize. | |
| 423 texture.suggested_allocation = true; | |
| 359 | 424 |
| 360 if (!service_id) { | 425 if (!service_id) { |
| 361 glGenTextures(1, &service_id); | 426 glGenTextures(1, &service_id); |
| 362 info->SetServiceId(service_id); | 427 info->SetServiceId(service_id); |
| 363 } | 428 } |
| 364 | 429 |
| 365 if (size != texture.size) { | 430 if (size != texture.size) { |
| 366 texture.size = size; | 431 texture.size = size; |
| 367 TextureManager* texture_manager = | 432 TextureManager* texture_manager = |
| 368 parent_stub_->decoder()->GetContextGroup()->texture_manager(); | 433 parent_stub_->decoder()->GetContextGroup()->texture_manager(); |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 385 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | 450 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); |
| 386 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); | 451 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); |
| 387 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | 452 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
| 388 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | 453 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
| 389 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, | 454 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, |
| 390 size.width(), size.height(), 0, | 455 size.width(), size.height(), 0, |
| 391 GL_RGBA, GL_UNSIGNED_BYTE, NULL); | 456 GL_RGBA, GL_UNSIGNED_BYTE, NULL); |
| 392 CHECK_GL_ERROR(); | 457 CHECK_GL_ERROR(); |
| 393 } | 458 } |
| 394 | 459 |
| 395 AttachBackTextureToFBO(); | 460 AttachTextureToFBO(id); |
| 396 | 461 |
| 397 GpuHostMsg_AcceleratedSurfaceNew_Params params; | 462 GpuHostMsg_AcceleratedSurfaceNew_Params params; |
| 398 params.width = size.width(); | 463 params.width = size.width(); |
| 399 params.height = size.height(); | 464 params.height = size.height(); |
| 400 params.surface_handle = texture.client_id; | 465 params.surface_handle = texture.client_id; |
| 401 helper_->SendAcceleratedSurfaceNew(params); | 466 helper_->SendAcceleratedSurfaceNew(params); |
| 402 texture.sent_to_client = true; | 467 texture.sent_to_client = true; |
| 403 } | 468 } |
| 404 | 469 |
| 405 void TextureImageTransportSurface::AttachBackTextureToFBO() { | 470 void TextureImageTransportSurface::AttachTextureToFBO(int id) { |
|
piman
2012/05/04 00:03:49
Having an id here smells fishy to me. Why would we
mmocny
2012/05/04 18:55:52
Yes, this was just a result of CreateTexture takin
| |
| 406 if (!parent_stub_) | 471 if (!parent_stub_) |
| 407 return; | 472 return; |
| 408 DCHECK(textures_[back()].info); | 473 TextureInfo* info = textures_[id].info; |
| 474 DCHECK(textures_[id].info); | |
| 409 | 475 |
| 410 ScopedFrameBufferBinder fbo_binder(fbo_id_); | 476 ScopedFrameBufferBinder fbo_binder(fbo_id_); |
| 411 glFramebufferTexture2DEXT(GL_FRAMEBUFFER, | 477 glFramebufferTexture2DEXT(GL_FRAMEBUFFER, |
| 412 GL_COLOR_ATTACHMENT0, | 478 GL_COLOR_ATTACHMENT0, |
| 413 GL_TEXTURE_2D, | 479 GL_TEXTURE_2D, |
| 414 textures_[back()].info->service_id(), | 480 info->service_id(), |
| 415 0); | 481 0); |
| 416 glFlush(); | 482 glFlush(); |
| 417 CHECK_GL_ERROR(); | 483 CHECK_GL_ERROR(); |
| 418 | 484 |
| 419 #ifndef NDEBUG | 485 #ifndef NDEBUG |
| 420 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); | 486 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); |
| 421 if (status != GL_FRAMEBUFFER_COMPLETE) { | 487 if (status != GL_FRAMEBUFFER_COMPLETE) { |
| 422 DLOG(ERROR) << "Framebuffer incomplete."; | 488 DLOG(ERROR) << "Framebuffer incomplete."; |
| 423 } | 489 } |
| 424 #endif | 490 #endif |
| 425 } | 491 } |
| 426 | 492 |
| 427 void TextureImageTransportSurface::ReleaseParentStub() { | 493 void TextureImageTransportSurface::ReleaseParentStub() { |
| 428 DCHECK(parent_stub_); | 494 DCHECK(parent_stub_); |
| 429 parent_stub_->RemoveDestructionObserver(this); | 495 parent_stub_->RemoveDestructionObserver(this); |
| 430 for (int i = 0; i < 2; ++i) { | 496 for (int i = 0; i < 2; ++i) { |
| 431 Texture& texture = textures_[i]; | 497 Texture& texture = textures_[i]; |
| 432 texture.info = NULL; | 498 texture.info = NULL; |
| 433 if (!texture.sent_to_client) | 499 if (!texture.sent_to_client) |
| 434 continue; | 500 continue; |
| 435 GpuHostMsg_AcceleratedSurfaceRelease_Params params; | 501 GpuHostMsg_AcceleratedSurfaceRelease_Params params; |
| 436 params.identifier = texture.client_id; | 502 params.identifier = texture.client_id; |
| 437 helper_->SendAcceleratedSurfaceRelease(params); | 503 helper_->SendAcceleratedSurfaceRelease(params); |
| 438 } | 504 } |
| 439 parent_stub_ = NULL; | 505 parent_stub_ = NULL; |
| 440 } | 506 } |
| OLD | NEW |