Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(26)

Side by Side Diff: gpu/ipc/service/gpu_command_buffer_stub.cc

Issue 1845563005: Refactor content/common/gpu into gpu/ipc/service (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Drop ref to deleted content_tests_gypi_values.content_unittests_ozone_sources Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/ipc/service/gpu_command_buffer_stub.h ('k') | gpu/ipc/service/gpu_config.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_command_buffer_stub.h" 5 #include "gpu/ipc/service/gpu_command_buffer_stub.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/bind_helpers.h" 10 #include "base/bind_helpers.h"
11 #include "base/hash.h" 11 #include "base/hash.h"
12 #include "base/json/json_writer.h" 12 #include "base/json/json_writer.h"
13 #include "base/macros.h" 13 #include "base/macros.h"
14 #include "base/memory/shared_memory.h" 14 #include "base/memory/shared_memory.h"
15 #include "base/time/time.h" 15 #include "base/time/time.h"
16 #include "base/trace_event/trace_event.h" 16 #include "base/trace_event/trace_event.h"
17 #include "build/build_config.h" 17 #include "build/build_config.h"
18 #include "content/common/gpu/gpu_channel.h"
19 #include "content/common/gpu/gpu_channel_manager.h"
20 #include "content/common/gpu/gpu_channel_manager_delegate.h"
21 #include "content/common/gpu/gpu_memory_manager.h"
22 #include "content/common/gpu/gpu_memory_tracking.h"
23 #include "content/common/gpu/gpu_watchdog.h"
24 #include "content/common/gpu/image_transport_surface.h"
25 #include "gpu/command_buffer/common/constants.h" 18 #include "gpu/command_buffer/common/constants.h"
26 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" 19 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
27 #include "gpu/command_buffer/common/mailbox.h" 20 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/common/sync_token.h" 21 #include "gpu/command_buffer/common/sync_token.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h" 22 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h" 23 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/image_manager.h" 24 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/logger.h" 25 #include "gpu/command_buffer/service/logger.h"
33 #include "gpu/command_buffer/service/mailbox_manager.h" 26 #include "gpu/command_buffer/service/mailbox_manager.h"
34 #include "gpu/command_buffer/service/memory_tracking.h" 27 #include "gpu/command_buffer/service/memory_tracking.h"
35 #include "gpu/command_buffer/service/query_manager.h" 28 #include "gpu/command_buffer/service/query_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h" 29 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "gpu/command_buffer/service/transfer_buffer_manager.h" 30 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
38 #include "gpu/command_buffer/service/valuebuffer_manager.h" 31 #include "gpu/command_buffer/service/valuebuffer_manager.h"
39 #include "gpu/ipc/common/gpu_messages.h" 32 #include "gpu/ipc/common/gpu_messages.h"
33 #include "gpu/ipc/service/gpu_channel.h"
34 #include "gpu/ipc/service/gpu_channel_manager.h"
35 #include "gpu/ipc/service/gpu_channel_manager_delegate.h"
36 #include "gpu/ipc/service/gpu_memory_manager.h"
37 #include "gpu/ipc/service/gpu_memory_tracking.h"
38 #include "gpu/ipc/service/gpu_watchdog.h"
39 #include "gpu/ipc/service/image_transport_surface.h"
40 #include "ui/gl/gl_bindings.h" 40 #include "ui/gl/gl_bindings.h"
41 #include "ui/gl/gl_image.h" 41 #include "ui/gl/gl_image.h"
42 #include "ui/gl/gl_switches.h" 42 #include "ui/gl/gl_switches.h"
43 43
44 #if defined(OS_WIN) 44 #if defined(OS_WIN)
45 #include "base/win/win_util.h" 45 #include "base/win/win_util.h"
46 #endif 46 #endif
47 47
48 #if defined(OS_ANDROID) 48 #if defined(OS_ANDROID)
49 #include "content/common/gpu/stream_texture_android.h" 49 #include "gpu/ipc/service/stream_texture_android.h"
50 #endif 50 #endif
51 51
52 namespace content { 52 namespace gpu {
53 struct WaitForCommandState { 53 struct WaitForCommandState {
54 WaitForCommandState(int32_t start, int32_t end, IPC::Message* reply) 54 WaitForCommandState(int32_t start, int32_t end, IPC::Message* reply)
55 : start(start), end(end), reply(reply) {} 55 : start(start), end(end), reply(reply) {}
56 56
57 int32_t start; 57 int32_t start;
58 int32_t end; 58 int32_t end;
59 scoped_ptr<IPC::Message> reply; 59 scoped_ptr<IPC::Message> reply;
60 }; 60 };
61 61
62 namespace { 62 namespace {
63 63
64 // The GpuCommandBufferMemoryTracker class provides a bridge between the 64 // The GpuCommandBufferMemoryTracker class provides a bridge between the
65 // ContextGroup's memory type managers and the GpuMemoryManager class. 65 // ContextGroup's memory type managers and the GpuMemoryManager class.
66 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker { 66 class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker {
67 public: 67 public:
68 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel, 68 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel,
69 uint64_t share_group_tracing_guid) 69 uint64_t share_group_tracing_guid)
70 : tracking_group_( 70 : tracking_group_(
71 channel->gpu_channel_manager() 71 channel->gpu_channel_manager()
72 ->gpu_memory_manager() 72 ->gpu_memory_manager()
73 ->CreateTrackingGroup(channel->GetClientPID(), this)), 73 ->CreateTrackingGroup(channel->GetClientPID(), this)),
74 client_tracing_id_(channel->client_tracing_id()), 74 client_tracing_id_(channel->client_tracing_id()),
75 client_id_(channel->client_id()), 75 client_id_(channel->client_id()),
76 share_group_tracing_guid_(share_group_tracing_guid) {} 76 share_group_tracing_guid_(share_group_tracing_guid) {}
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
147 }; 147 };
148 148
149 scoped_ptr<base::trace_event::ConvertableToTraceFormat> 149 scoped_ptr<base::trace_event::ConvertableToTraceFormat>
150 DevToolsChannelData::CreateForChannel(GpuChannel* channel) { 150 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
151 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue); 151 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
152 res->SetInteger("renderer_pid", channel->GetClientPID()); 152 res->SetInteger("renderer_pid", channel->GetClientPID());
153 res->SetDouble("used_bytes", channel->GetMemoryUsage()); 153 res->SetDouble("used_bytes", channel->GetMemoryUsage());
154 return make_scoped_ptr(new DevToolsChannelData(res.release())); 154 return make_scoped_ptr(new DevToolsChannelData(res.release()));
155 } 155 }
156 156
157 gpu::CommandBufferId GetCommandBufferID(int channel_id, int32_t route_id) { 157 CommandBufferId GetCommandBufferID(int channel_id, int32_t route_id) {
158 return gpu::CommandBufferId::FromUnsafeValue( 158 return CommandBufferId::FromUnsafeValue(
159 (static_cast<uint64_t>(channel_id) << 32) | route_id); 159 (static_cast<uint64_t>(channel_id) << 32) | route_id);
160 } 160 }
161 161
162 } // namespace 162 } // namespace
163 163
164 GpuCommandBufferStub::GpuCommandBufferStub( 164 GpuCommandBufferStub::GpuCommandBufferStub(
165 GpuChannel* channel, 165 GpuChannel* channel,
166 gpu::SyncPointManager* sync_point_manager, 166 SyncPointManager* sync_point_manager,
167 base::SingleThreadTaskRunner* task_runner, 167 base::SingleThreadTaskRunner* task_runner,
168 GpuCommandBufferStub* share_group, 168 GpuCommandBufferStub* share_group,
169 gpu::SurfaceHandle surface_handle, 169 SurfaceHandle surface_handle,
170 gpu::gles2::MailboxManager* mailbox_manager, 170 gles2::MailboxManager* mailbox_manager,
171 gpu::PreemptionFlag* preempt_by_flag, 171 PreemptionFlag* preempt_by_flag,
172 gpu::gles2::SubscriptionRefSet* subscription_ref_set, 172 gles2::SubscriptionRefSet* subscription_ref_set,
173 gpu::ValueStateMap* pending_valuebuffer_state, 173 ValueStateMap* pending_valuebuffer_state,
174 const gfx::Size& size, 174 const gfx::Size& size,
175 const gpu::gles2::DisallowedFeatures& disallowed_features, 175 const gles2::DisallowedFeatures& disallowed_features,
176 const std::vector<int32_t>& attribs, 176 const std::vector<int32_t>& attribs,
177 gfx::GpuPreference gpu_preference, 177 gfx::GpuPreference gpu_preference,
178 int32_t stream_id, 178 int32_t stream_id,
179 int32_t route_id, 179 int32_t route_id,
180 GpuWatchdog* watchdog, 180 GpuWatchdog* watchdog,
181 const GURL& active_url) 181 const GURL& active_url)
182 : channel_(channel), 182 : channel_(channel),
183 sync_point_manager_(sync_point_manager), 183 sync_point_manager_(sync_point_manager),
184 task_runner_(task_runner), 184 task_runner_(task_runner),
185 initialized_(false), 185 initialized_(false),
186 surface_handle_(surface_handle), 186 surface_handle_(surface_handle),
187 initial_size_(size), 187 initial_size_(size),
188 disallowed_features_(disallowed_features), 188 disallowed_features_(disallowed_features),
189 requested_attribs_(attribs), 189 requested_attribs_(attribs),
190 gpu_preference_(gpu_preference), 190 gpu_preference_(gpu_preference),
191 use_virtualized_gl_context_(false), 191 use_virtualized_gl_context_(false),
192 command_buffer_id_(GetCommandBufferID(channel->client_id(), route_id)), 192 command_buffer_id_(GetCommandBufferID(channel->client_id(), route_id)),
193 stream_id_(stream_id), 193 stream_id_(stream_id),
194 route_id_(route_id), 194 route_id_(route_id),
195 last_flush_count_(0), 195 last_flush_count_(0),
196 surface_format_(gfx::GLSurface::SURFACE_DEFAULT), 196 surface_format_(gfx::GLSurface::SURFACE_DEFAULT),
197 watchdog_(watchdog), 197 watchdog_(watchdog),
198 waiting_for_sync_point_(false), 198 waiting_for_sync_point_(false),
199 previous_processed_num_(0), 199 previous_processed_num_(0),
200 preemption_flag_(preempt_by_flag), 200 preemption_flag_(preempt_by_flag),
201 active_url_(active_url) { 201 active_url_(active_url) {
202 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); 202 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
203 FastSetActiveURL(active_url_, active_url_hash_, channel_); 203 FastSetActiveURL(active_url_, active_url_hash_, channel_);
204 204
205 gpu::gles2::ContextCreationAttribHelper attrib_parser; 205 gles2::ContextCreationAttribHelper attrib_parser;
206 attrib_parser.Parse(requested_attribs_); 206 attrib_parser.Parse(requested_attribs_);
207 207
208 if (share_group) { 208 if (share_group) {
209 context_group_ = share_group->context_group_; 209 context_group_ = share_group->context_group_;
210 DCHECK(context_group_->bind_generates_resource() == 210 DCHECK(context_group_->bind_generates_resource() ==
211 attrib_parser.bind_generates_resource); 211 attrib_parser.bind_generates_resource);
212 } else { 212 } else {
213 context_group_ = new gpu::gles2::ContextGroup( 213 context_group_ = new gles2::ContextGroup(
214 channel_->gpu_channel_manager()->gpu_preferences(), mailbox_manager, 214 channel_->gpu_channel_manager()->gpu_preferences(), mailbox_manager,
215 new GpuCommandBufferMemoryTracker(channel, 215 new GpuCommandBufferMemoryTracker(channel,
216 command_buffer_id_.GetUnsafeValue()), 216 command_buffer_id_.GetUnsafeValue()),
217 channel_->gpu_channel_manager()->shader_translator_cache(), 217 channel_->gpu_channel_manager()->shader_translator_cache(),
218 channel_->gpu_channel_manager()->framebuffer_completeness_cache(), NULL, 218 channel_->gpu_channel_manager()->framebuffer_completeness_cache(), NULL,
219 subscription_ref_set, pending_valuebuffer_state, 219 subscription_ref_set, pending_valuebuffer_state,
220 attrib_parser.bind_generates_resource); 220 attrib_parser.bind_generates_resource);
221 } 221 }
222 222
223 // Virtualize PreferIntegratedGpu contexts by default on OS X to prevent 223 // Virtualize PreferIntegratedGpu contexts by default on OS X to prevent
(...skipping 12 matching lines...) Expand all
236 use_virtualized_gl_context_ |= mailbox_manager->UsesSync(); 236 use_virtualized_gl_context_ |= mailbox_manager->UsesSync();
237 237
238 #if defined(OS_ANDROID) 238 #if defined(OS_ANDROID)
239 if (attrib_parser.red_size <= 5 && 239 if (attrib_parser.red_size <= 5 &&
240 attrib_parser.green_size <= 6 && 240 attrib_parser.green_size <= 6 &&
241 attrib_parser.blue_size <= 5 && 241 attrib_parser.blue_size <= 5 &&
242 attrib_parser.alpha_size == 0) 242 attrib_parser.alpha_size == 0)
243 surface_format_ = gfx::GLSurface::SURFACE_RGB565; 243 surface_format_ = gfx::GLSurface::SURFACE_RGB565;
244 gfx::GLSurface* defaultOffscreenSurface = 244 gfx::GLSurface* defaultOffscreenSurface =
245 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(); 245 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface();
246 bool is_onscreen = (surface_handle_ != gpu::kNullSurfaceHandle); 246 bool is_onscreen = (surface_handle_ != kNullSurfaceHandle);
247 if (surface_format_ != defaultOffscreenSurface->GetFormat() && is_onscreen) 247 if (surface_format_ != defaultOffscreenSurface->GetFormat() && is_onscreen)
248 use_virtualized_gl_context_ = false; 248 use_virtualized_gl_context_ = false;
249 #endif 249 #endif
250 250
251 if ((surface_handle_ == gpu::kNullSurfaceHandle) && initial_size_.IsEmpty()) { 251 if ((surface_handle_ == kNullSurfaceHandle) && initial_size_.IsEmpty()) {
252 // If we're an offscreen surface with zero width and/or height, set to a 252 // If we're an offscreen surface with zero width and/or height, set to a
253 // non-zero size so that we have a complete framebuffer for operations like 253 // non-zero size so that we have a complete framebuffer for operations like
254 // glClear. 254 // glClear.
255 initial_size_ = gfx::Size(1, 1); 255 initial_size_ = gfx::Size(1, 1);
256 } 256 }
257 } 257 }
258 258
259 GpuCommandBufferStub::~GpuCommandBufferStub() { 259 GpuCommandBufferStub::~GpuCommandBufferStub() {
260 Destroy(); 260 Destroy();
261 } 261 }
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
384 384
385 executor_->ProcessPendingQueries(); 385 executor_->ProcessPendingQueries();
386 } 386 }
387 387
388 ScheduleDelayedWork( 388 ScheduleDelayedWork(
389 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs)); 389 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs));
390 } 390 }
391 391
392 bool GpuCommandBufferStub::HasUnprocessedCommands() { 392 bool GpuCommandBufferStub::HasUnprocessedCommands() {
393 if (command_buffer_) { 393 if (command_buffer_) {
394 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); 394 CommandBuffer::State state = command_buffer_->GetLastState();
395 return command_buffer_->GetPutOffset() != state.get_offset && 395 return command_buffer_->GetPutOffset() != state.get_offset &&
396 !gpu::error::IsError(state.error); 396 !error::IsError(state.error);
397 } 397 }
398 return false; 398 return false;
399 } 399 }
400 400
401 void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) { 401 void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) {
402 bool has_more_work = executor_.get() && (executor_->HasPendingQueries() || 402 bool has_more_work = executor_.get() && (executor_->HasPendingQueries() ||
403 executor_->HasMoreIdleWork()); 403 executor_->HasMoreIdleWork());
404 if (!has_more_work) { 404 if (!has_more_work) {
405 last_idle_time_ = base::TimeTicks(); 405 last_idle_time_ = base::TimeTicks();
406 return; 406 return;
(...skipping 29 matching lines...) Expand all
436 task_runner_->PostDelayedTask( 436 task_runner_->PostDelayedTask(
437 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()), 437 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
438 delay); 438 delay);
439 } 439 }
440 440
441 bool GpuCommandBufferStub::MakeCurrent() { 441 bool GpuCommandBufferStub::MakeCurrent() {
442 if (decoder_->MakeCurrent()) 442 if (decoder_->MakeCurrent())
443 return true; 443 return true;
444 DLOG(ERROR) << "Context lost because MakeCurrent failed."; 444 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
445 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); 445 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
446 command_buffer_->SetParseError(gpu::error::kLostContext); 446 command_buffer_->SetParseError(error::kLostContext);
447 CheckContextLost(); 447 CheckContextLost();
448 return false; 448 return false;
449 } 449 }
450 450
451 void GpuCommandBufferStub::Destroy() { 451 void GpuCommandBufferStub::Destroy() {
452 if (wait_for_token_) { 452 if (wait_for_token_) {
453 Send(wait_for_token_->reply.release()); 453 Send(wait_for_token_->reply.release());
454 wait_for_token_.reset(); 454 wait_for_token_.reset();
455 } 455 }
456 if (wait_for_get_offset_) { 456 if (wait_for_get_offset_) {
457 Send(wait_for_get_offset_->reply.release()); 457 Send(wait_for_get_offset_->reply.release());
458 wait_for_get_offset_.reset(); 458 wait_for_get_offset_.reset();
459 } 459 }
460 460
461 if (initialized_) { 461 if (initialized_) {
462 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 462 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
463 if ((surface_handle_ == gpu::kNullSurfaceHandle) && !active_url_.is_empty()) 463 if ((surface_handle_ == kNullSurfaceHandle) && !active_url_.is_empty())
464 gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_); 464 gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_);
465 } 465 }
466 466
467 if (decoder_) 467 if (decoder_)
468 decoder_->set_engine(NULL); 468 decoder_->set_engine(NULL);
469 469
470 // The scheduler has raw references to the decoder and the command buffer so 470 // The scheduler has raw references to the decoder and the command buffer so
471 // destroy it before those. 471 // destroy it before those.
472 executor_.reset(); 472 executor_.reset();
473 473
(...skipping 16 matching lines...) Expand all
490 490
491 command_buffer_.reset(); 491 command_buffer_.reset();
492 492
493 // Remove this after crbug.com/248395 is sorted out. 493 // Remove this after crbug.com/248395 is sorted out.
494 surface_ = NULL; 494 surface_ = NULL;
495 } 495 }
496 496
497 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) { 497 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
498 Destroy(); 498 Destroy();
499 GpuCommandBufferMsg_Initialize::WriteReplyParams( 499 GpuCommandBufferMsg_Initialize::WriteReplyParams(
500 reply_message, false, gpu::Capabilities()); 500 reply_message, false, Capabilities());
501 Send(reply_message); 501 Send(reply_message);
502 } 502 }
503 503
504 scoped_refptr<gfx::GLSurface> GpuCommandBufferStub::CreateSurface() { 504 scoped_refptr<gfx::GLSurface> GpuCommandBufferStub::CreateSurface() {
505 GpuChannelManager* manager = channel_->gpu_channel_manager(); 505 GpuChannelManager* manager = channel_->gpu_channel_manager();
506 scoped_refptr<gfx::GLSurface> surface; 506 scoped_refptr<gfx::GLSurface> surface;
507 if (surface_handle_ != gpu::kNullSurfaceHandle) { 507 if (surface_handle_ != kNullSurfaceHandle) {
508 surface = ImageTransportSurface::CreateNativeSurface( 508 surface = ImageTransportSurface::CreateNativeSurface(
509 manager, this, surface_handle_, surface_format_); 509 manager, this, surface_handle_, surface_format_);
510 if (!surface || !surface->Initialize(surface_format_)) 510 if (!surface || !surface->Initialize(surface_format_))
511 return nullptr; 511 return nullptr;
512 } else { 512 } else {
513 surface = manager->GetDefaultOffscreenSurface(); 513 surface = manager->GetDefaultOffscreenSurface();
514 } 514 }
515 return surface; 515 return surface;
516 } 516 }
517 517
518 void GpuCommandBufferStub::OnInitialize( 518 void GpuCommandBufferStub::OnInitialize(
519 base::SharedMemoryHandle shared_state_handle, 519 base::SharedMemoryHandle shared_state_handle,
520 IPC::Message* reply_message) { 520 IPC::Message* reply_message) {
521 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize"); 521 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
522 DCHECK(!command_buffer_.get()); 522 DCHECK(!command_buffer_.get());
523 523
524 scoped_ptr<base::SharedMemory> shared_state_shm( 524 scoped_ptr<base::SharedMemory> shared_state_shm(
525 new base::SharedMemory(shared_state_handle, false)); 525 new base::SharedMemory(shared_state_handle, false));
526 526
527 command_buffer_.reset(new gpu::CommandBufferService( 527 command_buffer_.reset(new CommandBufferService(
528 context_group_->transfer_buffer_manager())); 528 context_group_->transfer_buffer_manager()));
529 529
530 bool result = command_buffer_->Initialize(); 530 bool result = command_buffer_->Initialize();
531 DCHECK(result); 531 DCHECK(result);
532 532
533 GpuChannelManager* manager = channel_->gpu_channel_manager(); 533 GpuChannelManager* manager = channel_->gpu_channel_manager();
534 DCHECK(manager); 534 DCHECK(manager);
535 535
536 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get())); 536 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get()));
537 executor_.reset(new gpu::CommandExecutor(command_buffer_.get(), 537 executor_.reset(new CommandExecutor(command_buffer_.get(),
538 decoder_.get(), decoder_.get())); 538 decoder_.get(), decoder_.get()));
539 sync_point_client_ = sync_point_manager_->CreateSyncPointClient( 539 sync_point_client_ = sync_point_manager_->CreateSyncPointClient(
540 channel_->GetSyncPointOrderData(stream_id_), 540 channel_->GetSyncPointOrderData(stream_id_),
541 gpu::CommandBufferNamespace::GPU_IO, command_buffer_id_); 541 CommandBufferNamespace::GPU_IO, command_buffer_id_);
542 542
543 if (preemption_flag_.get()) 543 if (preemption_flag_.get())
544 executor_->SetPreemptByFlag(preemption_flag_); 544 executor_->SetPreemptByFlag(preemption_flag_);
545 545
546 decoder_->set_engine(executor_.get()); 546 decoder_->set_engine(executor_.get());
547 547
548 surface_ = CreateSurface(); 548 surface_ = CreateSurface();
549 if (!surface_.get()) { 549 if (!surface_.get()) {
550 DLOG(ERROR) << "Failed to create surface."; 550 DLOG(ERROR) << "Failed to create surface.";
551 OnInitializeFailed(reply_message); 551 OnInitializeFailed(reply_message);
(...skipping 11 matching lines...) Expand all
563 gpu_preference_); 563 gpu_preference_);
564 if (!context.get()) { 564 if (!context.get()) {
565 DLOG(ERROR) << "Failed to create shared context for virtualization."; 565 DLOG(ERROR) << "Failed to create shared context for virtualization.";
566 OnInitializeFailed(reply_message); 566 OnInitializeFailed(reply_message);
567 return; 567 return;
568 } 568 }
569 channel_->share_group()->SetSharedContext(context.get()); 569 channel_->share_group()->SetSharedContext(context.get());
570 } 570 }
571 // This should be a non-virtual GL context. 571 // This should be a non-virtual GL context.
572 DCHECK(context->GetHandle()); 572 DCHECK(context->GetHandle());
573 context = new gpu::GLContextVirtual( 573 context = new GLContextVirtual(
574 share_group, context.get(), decoder_->AsWeakPtr()); 574 share_group, context.get(), decoder_->AsWeakPtr());
575 if (!context->Initialize(surface_.get(), gpu_preference_)) { 575 if (!context->Initialize(surface_.get(), gpu_preference_)) {
576 // The real context created above for the default offscreen surface 576 // The real context created above for the default offscreen surface
577 // might not be compatible with this surface. 577 // might not be compatible with this surface.
578 context = NULL; 578 context = NULL;
579 579
580 DLOG(ERROR) << "Failed to initialize virtual GL context."; 580 DLOG(ERROR) << "Failed to initialize virtual GL context.";
581 OnInitializeFailed(reply_message); 581 OnInitializeFailed(reply_message);
582 return; 582 return;
583 } 583 }
584 } 584 }
585 if (!context.get()) { 585 if (!context.get()) {
586 context = gfx::GLContext::CreateGLContext( 586 context = gfx::GLContext::CreateGLContext(
587 share_group, surface_.get(), gpu_preference_); 587 share_group, surface_.get(), gpu_preference_);
588 } 588 }
589 if (!context.get()) { 589 if (!context.get()) {
590 DLOG(ERROR) << "Failed to create context."; 590 DLOG(ERROR) << "Failed to create context.";
591 OnInitializeFailed(reply_message); 591 OnInitializeFailed(reply_message);
592 return; 592 return;
593 } 593 }
594 594
595 if (!context->MakeCurrent(surface_.get())) { 595 if (!context->MakeCurrent(surface_.get())) {
596 LOG(ERROR) << "Failed to make context current."; 596 LOG(ERROR) << "Failed to make context current.";
597 OnInitializeFailed(reply_message); 597 OnInitializeFailed(reply_message);
598 return; 598 return;
599 } 599 }
600 600
601 if (!context->GetGLStateRestorer()) { 601 if (!context->GetGLStateRestorer()) {
602 context->SetGLStateRestorer( 602 context->SetGLStateRestorer(
603 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr())); 603 new GLStateRestorerImpl(decoder_->AsWeakPtr()));
604 } 604 }
605 605
606 if (!context_group_->has_program_cache() && 606 if (!context_group_->has_program_cache() &&
607 !context_group_->feature_info()->workarounds().disable_program_cache) { 607 !context_group_->feature_info()->workarounds().disable_program_cache) {
608 context_group_->set_program_cache( 608 context_group_->set_program_cache(
609 channel_->gpu_channel_manager()->program_cache()); 609 channel_->gpu_channel_manager()->program_cache());
610 } 610 }
611 611
612 // Initialize the decoder with either the view or pbuffer GLContext. 612 // Initialize the decoder with either the view or pbuffer GLContext.
613 bool offscreen = (surface_handle_ == gpu::kNullSurfaceHandle); 613 bool offscreen = (surface_handle_ == kNullSurfaceHandle);
614 if (!decoder_->Initialize(surface_, context, offscreen, initial_size_, 614 if (!decoder_->Initialize(surface_, context, offscreen, initial_size_,
615 disallowed_features_, requested_attribs_)) { 615 disallowed_features_, requested_attribs_)) {
616 DLOG(ERROR) << "Failed to initialize decoder."; 616 DLOG(ERROR) << "Failed to initialize decoder.";
617 OnInitializeFailed(reply_message); 617 OnInitializeFailed(reply_message);
618 return; 618 return;
619 } 619 }
620 620
621 if (channel_->gpu_channel_manager()-> 621 if (channel_->gpu_channel_manager()->
622 gpu_preferences().enable_gpu_service_logging) { 622 gpu_preferences().enable_gpu_service_logging) {
623 decoder_->set_log_commands(true); 623 decoder_->set_log_commands(true);
624 } 624 }
625 625
626 decoder_->GetLogger()->SetMsgCallback( 626 decoder_->GetLogger()->SetMsgCallback(
627 base::Bind(&GpuCommandBufferStub::SendConsoleMessage, 627 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
628 base::Unretained(this))); 628 base::Unretained(this)));
629 decoder_->SetShaderCacheCallback( 629 decoder_->SetShaderCacheCallback(
630 base::Bind(&GpuCommandBufferStub::SendCachedShader, 630 base::Bind(&GpuCommandBufferStub::SendCachedShader,
631 base::Unretained(this))); 631 base::Unretained(this)));
632 decoder_->SetFenceSyncReleaseCallback(base::Bind( 632 decoder_->SetFenceSyncReleaseCallback(base::Bind(
633 &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this))); 633 &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this)));
634 decoder_->SetWaitFenceSyncCallback(base::Bind( 634 decoder_->SetWaitFenceSyncCallback(base::Bind(
635 &GpuCommandBufferStub::OnWaitFenceSync, base::Unretained(this))); 635 &GpuCommandBufferStub::OnWaitFenceSync, base::Unretained(this)));
636 636
637 command_buffer_->SetPutOffsetChangeCallback( 637 command_buffer_->SetPutOffsetChangeCallback(
638 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this))); 638 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
639 command_buffer_->SetGetBufferChangeCallback(base::Bind( 639 command_buffer_->SetGetBufferChangeCallback(base::Bind(
640 &gpu::CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); 640 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get())));
641 command_buffer_->SetParseErrorCallback( 641 command_buffer_->SetParseErrorCallback(
642 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this))); 642 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
643 executor_->SetSchedulingChangedCallback(base::Bind( 643 executor_->SetSchedulingChangedCallback(base::Bind(
644 &GpuCommandBufferStub::OnSchedulingChanged, base::Unretained(this))); 644 &GpuCommandBufferStub::OnSchedulingChanged, base::Unretained(this)));
645 645
646 if (watchdog_) { 646 if (watchdog_) {
647 executor_->SetCommandProcessedCallback(base::Bind( 647 executor_->SetCommandProcessedCallback(base::Bind(
648 &GpuCommandBufferStub::OnCommandProcessed, base::Unretained(this))); 648 &GpuCommandBufferStub::OnCommandProcessed, base::Unretained(this)));
649 } 649 }
650 650
651 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState); 651 const size_t kSharedStateSize = sizeof(CommandBufferSharedState);
652 if (!shared_state_shm->Map(kSharedStateSize)) { 652 if (!shared_state_shm->Map(kSharedStateSize)) {
653 DLOG(ERROR) << "Failed to map shared state buffer."; 653 DLOG(ERROR) << "Failed to map shared state buffer.";
654 OnInitializeFailed(reply_message); 654 OnInitializeFailed(reply_message);
655 return; 655 return;
656 } 656 }
657 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory( 657 command_buffer_->SetSharedStateBuffer(MakeBackingFromSharedMemory(
658 std::move(shared_state_shm), kSharedStateSize)); 658 std::move(shared_state_shm), kSharedStateSize));
659 659
660 gpu::Capabilities capabilities = decoder_->GetCapabilities(); 660 Capabilities capabilities = decoder_->GetCapabilities();
661 661
662 GpuCommandBufferMsg_Initialize::WriteReplyParams( 662 GpuCommandBufferMsg_Initialize::WriteReplyParams(
663 reply_message, true, capabilities); 663 reply_message, true, capabilities);
664 Send(reply_message); 664 Send(reply_message);
665 665
666 if ((surface_handle_ == gpu::kNullSurfaceHandle) && !active_url_.is_empty()) 666 if ((surface_handle_ == kNullSurfaceHandle) && !active_url_.is_empty())
667 manager->delegate()->DidCreateOffscreenContext(active_url_); 667 manager->delegate()->DidCreateOffscreenContext(active_url_);
668 668
669 initialized_ = true; 669 initialized_ = true;
670 } 670 }
671 671
672 void GpuCommandBufferStub::OnCreateStreamTexture(uint32_t texture_id, 672 void GpuCommandBufferStub::OnCreateStreamTexture(uint32_t texture_id,
673 int32_t stream_id, 673 int32_t stream_id,
674 bool* succeeded) { 674 bool* succeeded) {
675 #if defined(OS_ANDROID) 675 #if defined(OS_ANDROID)
676 *succeeded = StreamTexture::Create(this, texture_id, stream_id); 676 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
(...skipping 20 matching lines...) Expand all
697 } 697 }
698 698
699 void GpuCommandBufferStub::OnSetGetBuffer(int32_t shm_id, 699 void GpuCommandBufferStub::OnSetGetBuffer(int32_t shm_id,
700 IPC::Message* reply_message) { 700 IPC::Message* reply_message) {
701 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer"); 701 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
702 if (command_buffer_) 702 if (command_buffer_)
703 command_buffer_->SetGetBuffer(shm_id); 703 command_buffer_->SetGetBuffer(shm_id);
704 Send(reply_message); 704 Send(reply_message);
705 } 705 }
706 706
707 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) { 707 void GpuCommandBufferStub::OnProduceFrontBuffer(const Mailbox& mailbox) {
708 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer"); 708 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
709 if (!decoder_) { 709 if (!decoder_) {
710 LOG(ERROR) << "Can't produce front buffer before initialization."; 710 LOG(ERROR) << "Can't produce front buffer before initialization.";
711 return; 711 return;
712 } 712 }
713 713
714 decoder_->ProduceFrontBuffer(mailbox); 714 decoder_->ProduceFrontBuffer(mailbox);
715 } 715 }
716 716
717 void GpuCommandBufferStub::OnParseError() { 717 void GpuCommandBufferStub::OnParseError() {
718 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError"); 718 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
719 DCHECK(command_buffer_.get()); 719 DCHECK(command_buffer_.get());
720 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); 720 CommandBuffer::State state = command_buffer_->GetLastState();
721 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed( 721 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
722 route_id_, state.context_lost_reason, state.error); 722 route_id_, state.context_lost_reason, state.error);
723 msg->set_unblock(true); 723 msg->set_unblock(true);
724 Send(msg); 724 Send(msg);
725 725
726 // Tell the browser about this context loss as well, so it can 726 // Tell the browser about this context loss as well, so it can
727 // determine whether client APIs like WebGL need to be immediately 727 // determine whether client APIs like WebGL need to be immediately
728 // blocked from automatically running. 728 // blocked from automatically running.
729 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 729 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
730 gpu_channel_manager->delegate()->DidLoseContext( 730 gpu_channel_manager->delegate()->DidLoseContext(
731 (surface_handle_ == gpu::kNullSurfaceHandle), state.context_lost_reason, 731 (surface_handle_ == kNullSurfaceHandle), state.context_lost_reason,
732 active_url_); 732 active_url_);
733 733
734 CheckContextLost(); 734 CheckContextLost();
735 } 735 }
736 736
737 void GpuCommandBufferStub::OnSchedulingChanged(bool scheduled) { 737 void GpuCommandBufferStub::OnSchedulingChanged(bool scheduled) {
738 TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnSchedulingChanged", "scheduled", 738 TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnSchedulingChanged", "scheduled",
739 scheduled); 739 scheduled);
740 channel_->OnStreamRescheduled(stream_id_, scheduled); 740 channel_->OnStreamRescheduled(stream_id_, scheduled);
741 } 741 }
(...skipping 22 matching lines...) Expand all
764 LOG(ERROR) 764 LOG(ERROR)
765 << "Got WaitForGetOffset command while currently waiting for offset."; 765 << "Got WaitForGetOffset command while currently waiting for offset.";
766 } 766 }
767 wait_for_get_offset_ = 767 wait_for_get_offset_ =
768 make_scoped_ptr(new WaitForCommandState(start, end, reply_message)); 768 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
769 CheckCompleteWaits(); 769 CheckCompleteWaits();
770 } 770 }
771 771
772 void GpuCommandBufferStub::CheckCompleteWaits() { 772 void GpuCommandBufferStub::CheckCompleteWaits() {
773 if (wait_for_token_ || wait_for_get_offset_) { 773 if (wait_for_token_ || wait_for_get_offset_) {
774 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); 774 CommandBuffer::State state = command_buffer_->GetLastState();
775 if (wait_for_token_ && 775 if (wait_for_token_ &&
776 (gpu::CommandBuffer::InRange( 776 (CommandBuffer::InRange(
777 wait_for_token_->start, wait_for_token_->end, state.token) || 777 wait_for_token_->start, wait_for_token_->end, state.token) ||
778 state.error != gpu::error::kNoError)) { 778 state.error != error::kNoError)) {
779 ReportState(); 779 ReportState();
780 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams( 780 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
781 wait_for_token_->reply.get(), state); 781 wait_for_token_->reply.get(), state);
782 Send(wait_for_token_->reply.release()); 782 Send(wait_for_token_->reply.release());
783 wait_for_token_.reset(); 783 wait_for_token_.reset();
784 } 784 }
785 if (wait_for_get_offset_ && 785 if (wait_for_get_offset_ &&
786 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start, 786 (CommandBuffer::InRange(wait_for_get_offset_->start,
787 wait_for_get_offset_->end, 787 wait_for_get_offset_->end,
788 state.get_offset) || 788 state.get_offset) ||
789 state.error != gpu::error::kNoError)) { 789 state.error != error::kNoError)) {
790 ReportState(); 790 ReportState();
791 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams( 791 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
792 wait_for_get_offset_->reply.get(), state); 792 wait_for_get_offset_->reply.get(), state);
793 Send(wait_for_get_offset_->reply.release()); 793 Send(wait_for_get_offset_->reply.release());
794 wait_for_get_offset_.reset(); 794 wait_for_get_offset_.reset();
795 } 795 }
796 } 796 }
797 } 797 }
798 798
799 void GpuCommandBufferStub::OnAsyncFlush( 799 void GpuCommandBufferStub::OnAsyncFlush(
(...skipping 10 matching lines...) Expand all
810 << "Received a Flush message out-of-order"; 810 << "Received a Flush message out-of-order";
811 811
812 if (flush_count > last_flush_count_ && 812 if (flush_count > last_flush_count_ &&
813 ui::LatencyInfo::Verify(latency_info, 813 ui::LatencyInfo::Verify(latency_info,
814 "GpuCommandBufferStub::OnAsyncFlush") && 814 "GpuCommandBufferStub::OnAsyncFlush") &&
815 !latency_info_callback_.is_null()) { 815 !latency_info_callback_.is_null()) {
816 latency_info_callback_.Run(latency_info); 816 latency_info_callback_.Run(latency_info);
817 } 817 }
818 818
819 last_flush_count_ = flush_count; 819 last_flush_count_ = flush_count;
820 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState(); 820 CommandBuffer::State pre_state = command_buffer_->GetLastState();
821 command_buffer_->Flush(put_offset); 821 command_buffer_->Flush(put_offset);
822 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState(); 822 CommandBuffer::State post_state = command_buffer_->GetLastState();
823 823
824 if (pre_state.get_offset != post_state.get_offset) 824 if (pre_state.get_offset != post_state.get_offset)
825 ReportState(); 825 ReportState();
826 826
827 #if defined(OS_ANDROID) 827 #if defined(OS_ANDROID)
828 GpuChannelManager* manager = channel_->gpu_channel_manager(); 828 GpuChannelManager* manager = channel_->gpu_channel_manager();
829 manager->DidAccessGpu(); 829 manager->DidAccessGpu();
830 #endif 830 #endif
831 } 831 }
832 832
833 void GpuCommandBufferStub::OnRegisterTransferBuffer( 833 void GpuCommandBufferStub::OnRegisterTransferBuffer(
834 int32_t id, 834 int32_t id,
835 base::SharedMemoryHandle transfer_buffer, 835 base::SharedMemoryHandle transfer_buffer,
836 uint32_t size) { 836 uint32_t size) {
837 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer"); 837 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
838 838
839 // Take ownership of the memory and map it into this process. 839 // Take ownership of the memory and map it into this process.
840 // This validates the size. 840 // This validates the size.
841 scoped_ptr<base::SharedMemory> shared_memory( 841 scoped_ptr<base::SharedMemory> shared_memory(
842 new base::SharedMemory(transfer_buffer, false)); 842 new base::SharedMemory(transfer_buffer, false));
843 if (!shared_memory->Map(size)) { 843 if (!shared_memory->Map(size)) {
844 DVLOG(0) << "Failed to map shared memory."; 844 DVLOG(0) << "Failed to map shared memory.";
845 return; 845 return;
846 } 846 }
847 847
848 if (command_buffer_) { 848 if (command_buffer_) {
849 command_buffer_->RegisterTransferBuffer( 849 command_buffer_->RegisterTransferBuffer(
850 id, gpu::MakeBackingFromSharedMemory(std::move(shared_memory), size)); 850 id, MakeBackingFromSharedMemory(std::move(shared_memory), size));
851 } 851 }
852 } 852 }
853 853
854 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32_t id) { 854 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32_t id) {
855 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer"); 855 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
856 856
857 if (command_buffer_) 857 if (command_buffer_)
858 command_buffer_->DestroyTransferBuffer(id); 858 command_buffer_->DestroyTransferBuffer(id);
859 } 859 }
860 860
861 void GpuCommandBufferStub::OnCommandProcessed() { 861 void GpuCommandBufferStub::OnCommandProcessed() {
862 if (watchdog_) 862 if (watchdog_)
863 watchdog_->CheckArmed(); 863 watchdog_->CheckArmed();
864 } 864 }
865 865
866 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); } 866 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
867 867
868 void GpuCommandBufferStub::PutChanged() { 868 void GpuCommandBufferStub::PutChanged() {
869 FastSetActiveURL(active_url_, active_url_hash_, channel_); 869 FastSetActiveURL(active_url_, active_url_hash_, channel_);
870 executor_->PutChanged(); 870 executor_->PutChanged();
871 } 871 }
872 872
873 void GpuCommandBufferStub::PullTextureUpdates( 873 void GpuCommandBufferStub::PullTextureUpdates(
874 gpu::CommandBufferNamespace namespace_id, 874 CommandBufferNamespace namespace_id,
875 gpu::CommandBufferId command_buffer_id, 875 CommandBufferId command_buffer_id,
876 uint32_t release) { 876 uint32_t release) {
877 gpu::gles2::MailboxManager* mailbox_manager = 877 gles2::MailboxManager* mailbox_manager =
878 context_group_->mailbox_manager(); 878 context_group_->mailbox_manager();
879 if (mailbox_manager->UsesSync() && MakeCurrent()) { 879 if (mailbox_manager->UsesSync() && MakeCurrent()) {
880 gpu::SyncToken sync_token(namespace_id, 0, command_buffer_id, release); 880 SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
881 mailbox_manager->PullTextureUpdates(sync_token); 881 mailbox_manager->PullTextureUpdates(sync_token);
882 } 882 }
883 } 883 }
884 884
885 void GpuCommandBufferStub::OnSignalSyncToken(const gpu::SyncToken& sync_token, 885 void GpuCommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token,
886 uint32_t id) { 886 uint32_t id) {
887 scoped_refptr<gpu::SyncPointClientState> release_state = 887 scoped_refptr<SyncPointClientState> release_state =
888 sync_point_manager_->GetSyncPointClientState( 888 sync_point_manager_->GetSyncPointClientState(
889 sync_token.namespace_id(), sync_token.command_buffer_id()); 889 sync_token.namespace_id(), sync_token.command_buffer_id());
890 890
891 if (release_state) { 891 if (release_state) {
892 sync_point_client_->Wait(release_state.get(), sync_token.release_count(), 892 sync_point_client_->Wait(release_state.get(), sync_token.release_count(),
893 base::Bind(&GpuCommandBufferStub::OnSignalAck, 893 base::Bind(&GpuCommandBufferStub::OnSignalAck,
894 this->AsWeakPtr(), id)); 894 this->AsWeakPtr(), id));
895 } else { 895 } else {
896 OnSignalAck(id); 896 OnSignalAck(id);
897 } 897 }
898 } 898 }
899 899
900 void GpuCommandBufferStub::OnSignalAck(uint32_t id) { 900 void GpuCommandBufferStub::OnSignalAck(uint32_t id) {
901 Send(new GpuCommandBufferMsg_SignalAck(route_id_, id)); 901 Send(new GpuCommandBufferMsg_SignalAck(route_id_, id));
902 } 902 }
903 903
904 void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) { 904 void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) {
905 if (decoder_) { 905 if (decoder_) {
906 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager(); 906 gles2::QueryManager* query_manager = decoder_->GetQueryManager();
907 if (query_manager) { 907 if (query_manager) {
908 gpu::gles2::QueryManager::Query* query = 908 gles2::QueryManager::Query* query =
909 query_manager->GetQuery(query_id); 909 query_manager->GetQuery(query_id);
910 if (query) { 910 if (query) {
911 query->AddCallback( 911 query->AddCallback(
912 base::Bind(&GpuCommandBufferStub::OnSignalAck, 912 base::Bind(&GpuCommandBufferStub::OnSignalAck,
913 this->AsWeakPtr(), 913 this->AsWeakPtr(),
914 id)); 914 id));
915 return; 915 return;
916 } 916 }
917 } 917 }
918 } 918 }
919 // Something went wrong, run callback immediately. 919 // Something went wrong, run callback immediately.
920 OnSignalAck(id); 920 OnSignalAck(id);
921 } 921 }
922 922
923 void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) { 923 void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) {
924 if (sync_point_client_->client_state()->IsFenceSyncReleased(release)) { 924 if (sync_point_client_->client_state()->IsFenceSyncReleased(release)) {
925 DLOG(ERROR) << "Fence Sync has already been released."; 925 DLOG(ERROR) << "Fence Sync has already been released.";
926 return; 926 return;
927 } 927 }
928 928
929 gpu::gles2::MailboxManager* mailbox_manager = 929 gles2::MailboxManager* mailbox_manager =
930 context_group_->mailbox_manager(); 930 context_group_->mailbox_manager();
931 if (mailbox_manager->UsesSync() && MakeCurrent()) { 931 if (mailbox_manager->UsesSync() && MakeCurrent()) {
932 gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0, 932 SyncToken sync_token(CommandBufferNamespace::GPU_IO, 0,
933 command_buffer_id_, release); 933 command_buffer_id_, release);
934 mailbox_manager->PushTextureUpdates(sync_token); 934 mailbox_manager->PushTextureUpdates(sync_token);
935 } 935 }
936 936
937 sync_point_client_->ReleaseFenceSync(release); 937 sync_point_client_->ReleaseFenceSync(release);
938 } 938 }
939 939
940 bool GpuCommandBufferStub::OnWaitFenceSync( 940 bool GpuCommandBufferStub::OnWaitFenceSync(
941 gpu::CommandBufferNamespace namespace_id, 941 CommandBufferNamespace namespace_id,
942 gpu::CommandBufferId command_buffer_id, 942 CommandBufferId command_buffer_id,
943 uint64_t release) { 943 uint64_t release) {
944 DCHECK(!waiting_for_sync_point_); 944 DCHECK(!waiting_for_sync_point_);
945 DCHECK(executor_->scheduled()); 945 DCHECK(executor_->scheduled());
946 946
947 scoped_refptr<gpu::SyncPointClientState> release_state = 947 scoped_refptr<SyncPointClientState> release_state =
948 sync_point_manager_->GetSyncPointClientState(namespace_id, 948 sync_point_manager_->GetSyncPointClientState(namespace_id,
949 command_buffer_id); 949 command_buffer_id);
950 950
951 if (!release_state) 951 if (!release_state)
952 return true; 952 return true;
953 953
954 if (release_state->IsFenceSyncReleased(release)) { 954 if (release_state->IsFenceSyncReleased(release)) {
955 PullTextureUpdates(namespace_id, command_buffer_id, release); 955 PullTextureUpdates(namespace_id, command_buffer_id, release);
956 return true; 956 return true;
957 } 957 }
958 958
959 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub", 959 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub",
960 this); 960 this);
961 waiting_for_sync_point_ = true; 961 waiting_for_sync_point_ = true;
962 sync_point_client_->WaitNonThreadSafe( 962 sync_point_client_->WaitNonThreadSafe(
963 release_state.get(), release, task_runner_, 963 release_state.get(), release, task_runner_,
964 base::Bind(&GpuCommandBufferStub::OnWaitFenceSyncCompleted, 964 base::Bind(&GpuCommandBufferStub::OnWaitFenceSyncCompleted,
965 this->AsWeakPtr(), namespace_id, command_buffer_id, release)); 965 this->AsWeakPtr(), namespace_id, command_buffer_id, release));
966 966
967 if (!waiting_for_sync_point_) 967 if (!waiting_for_sync_point_)
968 return true; 968 return true;
969 969
970 executor_->SetScheduled(false); 970 executor_->SetScheduled(false);
971 return false; 971 return false;
972 } 972 }
973 973
974 void GpuCommandBufferStub::OnWaitFenceSyncCompleted( 974 void GpuCommandBufferStub::OnWaitFenceSyncCompleted(
975 gpu::CommandBufferNamespace namespace_id, 975 CommandBufferNamespace namespace_id,
976 gpu::CommandBufferId command_buffer_id, 976 CommandBufferId command_buffer_id,
977 uint64_t release) { 977 uint64_t release) {
978 DCHECK(waiting_for_sync_point_); 978 DCHECK(waiting_for_sync_point_);
979 TRACE_EVENT_ASYNC_END1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub", 979 TRACE_EVENT_ASYNC_END1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub",
980 this); 980 this);
981 PullTextureUpdates(namespace_id, command_buffer_id, release); 981 PullTextureUpdates(namespace_id, command_buffer_id, release);
982 waiting_for_sync_point_ = false; 982 waiting_for_sync_point_ = false;
983 executor_->SetScheduled(true); 983 executor_->SetScheduled(true);
984 } 984 }
985 985
986 void GpuCommandBufferStub::OnCreateImage( 986 void GpuCommandBufferStub::OnCreateImage(
987 const GpuCommandBufferMsg_CreateImage_Params& params) { 987 const GpuCommandBufferMsg_CreateImage_Params& params) {
988 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage"); 988 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
989 const int32_t id = params.id; 989 const int32_t id = params.id;
990 const gfx::GpuMemoryBufferHandle& handle = params.gpu_memory_buffer; 990 const gfx::GpuMemoryBufferHandle& handle = params.gpu_memory_buffer;
991 const gfx::Size& size = params.size; 991 const gfx::Size& size = params.size;
992 const gfx::BufferFormat& format = params.format; 992 const gfx::BufferFormat& format = params.format;
993 const uint32_t internalformat = params.internal_format; 993 const uint32_t internalformat = params.internal_format;
994 const uint64_t image_release_count = params.image_release_count; 994 const uint64_t image_release_count = params.image_release_count;
995 995
996 if (!decoder_) 996 if (!decoder_)
997 return; 997 return;
998 998
999 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); 999 gles2::ImageManager* image_manager = decoder_->GetImageManager();
1000 DCHECK(image_manager); 1000 DCHECK(image_manager);
1001 if (image_manager->LookupImage(id)) { 1001 if (image_manager->LookupImage(id)) {
1002 LOG(ERROR) << "Image already exists with same ID."; 1002 LOG(ERROR) << "Image already exists with same ID.";
1003 return; 1003 return;
1004 } 1004 }
1005 1005
1006 if (!gpu::IsGpuMemoryBufferFormatSupported(format, 1006 if (!gpu::IsGpuMemoryBufferFormatSupported(format,
1007 decoder_->GetCapabilities())) { 1007 decoder_->GetCapabilities())) {
1008 LOG(ERROR) << "Format is not supported."; 1008 LOG(ERROR) << "Format is not supported.";
1009 return; 1009 return;
(...skipping 20 matching lines...) Expand all
1030 sync_point_client_->ReleaseFenceSync(image_release_count); 1030 sync_point_client_->ReleaseFenceSync(image_release_count);
1031 } 1031 }
1032 } 1032 }
1033 1033
1034 void GpuCommandBufferStub::OnDestroyImage(int32_t id) { 1034 void GpuCommandBufferStub::OnDestroyImage(int32_t id) {
1035 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage"); 1035 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
1036 1036
1037 if (!decoder_) 1037 if (!decoder_)
1038 return; 1038 return;
1039 1039
1040 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); 1040 gles2::ImageManager* image_manager = decoder_->GetImageManager();
1041 DCHECK(image_manager); 1041 DCHECK(image_manager);
1042 if (!image_manager->LookupImage(id)) { 1042 if (!image_manager->LookupImage(id)) {
1043 LOG(ERROR) << "Image with ID doesn't exist."; 1043 LOG(ERROR) << "Image with ID doesn't exist.";
1044 return; 1044 return;
1045 } 1045 }
1046 1046
1047 image_manager->RemoveImage(id); 1047 image_manager->RemoveImage(id);
1048 } 1048 }
1049 1049
1050 void GpuCommandBufferStub::SendConsoleMessage(int32_t id, 1050 void GpuCommandBufferStub::SendConsoleMessage(int32_t id,
(...skipping 15 matching lines...) Expand all
1066 void GpuCommandBufferStub::AddDestructionObserver( 1066 void GpuCommandBufferStub::AddDestructionObserver(
1067 DestructionObserver* observer) { 1067 DestructionObserver* observer) {
1068 destruction_observers_.AddObserver(observer); 1068 destruction_observers_.AddObserver(observer);
1069 } 1069 }
1070 1070
1071 void GpuCommandBufferStub::RemoveDestructionObserver( 1071 void GpuCommandBufferStub::RemoveDestructionObserver(
1072 DestructionObserver* observer) { 1072 DestructionObserver* observer) {
1073 destruction_observers_.RemoveObserver(observer); 1073 destruction_observers_.RemoveObserver(observer);
1074 } 1074 }
1075 1075
1076 const gpu::gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const { 1076 const gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const {
1077 return context_group_->feature_info(); 1077 return context_group_->feature_info();
1078 } 1078 }
1079 1079
1080 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const { 1080 gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1081 return context_group_->memory_tracker(); 1081 return context_group_->memory_tracker();
1082 } 1082 }
1083 1083
1084 bool GpuCommandBufferStub::CheckContextLost() { 1084 bool GpuCommandBufferStub::CheckContextLost() {
1085 DCHECK(command_buffer_); 1085 DCHECK(command_buffer_);
1086 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); 1086 CommandBuffer::State state = command_buffer_->GetLastState();
1087 bool was_lost = state.error == gpu::error::kLostContext; 1087 bool was_lost = state.error == error::kLostContext;
1088 1088
1089 if (was_lost) { 1089 if (was_lost) {
1090 bool was_lost_by_robustness = 1090 bool was_lost_by_robustness =
1091 decoder_ && decoder_->WasContextLostByRobustnessExtension(); 1091 decoder_ && decoder_->WasContextLostByRobustnessExtension();
1092 1092
1093 // Work around issues with recovery by allowing a new GPU process to launch. 1093 // Work around issues with recovery by allowing a new GPU process to launch.
1094 if ((was_lost_by_robustness || 1094 if ((was_lost_by_robustness ||
1095 context_group_->feature_info()->workarounds().exit_on_context_lost) && 1095 context_group_->feature_info()->workarounds().exit_on_context_lost) &&
1096 !channel_->gpu_channel_manager()->gpu_preferences().single_process && 1096 !channel_->gpu_channel_manager()->gpu_preferences().single_process &&
1097 !channel_->gpu_channel_manager()->gpu_preferences().in_process_gpu) { 1097 !channel_->gpu_channel_manager()->gpu_preferences().in_process_gpu) {
(...skipping 12 matching lines...) Expand all
1110 channel_->LoseAllContexts(); 1110 channel_->LoseAllContexts();
1111 } 1111 }
1112 } 1112 }
1113 1113
1114 CheckCompleteWaits(); 1114 CheckCompleteWaits();
1115 return was_lost; 1115 return was_lost;
1116 } 1116 }
1117 1117
1118 void GpuCommandBufferStub::MarkContextLost() { 1118 void GpuCommandBufferStub::MarkContextLost() {
1119 if (!command_buffer_ || 1119 if (!command_buffer_ ||
1120 command_buffer_->GetLastState().error == gpu::error::kLostContext) 1120 command_buffer_->GetLastState().error == error::kLostContext)
1121 return; 1121 return;
1122 1122
1123 command_buffer_->SetContextLostReason(gpu::error::kUnknown); 1123 command_buffer_->SetContextLostReason(error::kUnknown);
1124 if (decoder_) 1124 if (decoder_)
1125 decoder_->MarkContextLost(gpu::error::kUnknown); 1125 decoder_->MarkContextLost(error::kUnknown);
1126 command_buffer_->SetParseError(gpu::error::kLostContext); 1126 command_buffer_->SetParseError(error::kLostContext);
1127 } 1127 }
1128 1128
1129 void GpuCommandBufferStub::SendSwapBuffersCompleted( 1129 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1130 const std::vector<ui::LatencyInfo>& latency_info, 1130 const std::vector<ui::LatencyInfo>& latency_info,
1131 gfx::SwapResult result) { 1131 gfx::SwapResult result) {
1132 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info, 1132 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info,
1133 result)); 1133 result));
1134 } 1134 }
1135 1135
1136 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase, 1136 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1137 base::TimeDelta interval) { 1137 base::TimeDelta interval) {
1138 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase, 1138 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1139 interval)); 1139 interval));
1140 } 1140 }
1141 1141
1142 } // namespace content 1142 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/ipc/service/gpu_command_buffer_stub.h ('k') | gpu/ipc/service/gpu_config.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698