Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/ipc/service/gpu_channel.h" | 5 #include "gpu/ipc/service/gpu_channel.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #if defined(OS_WIN) | 9 #if defined(OS_WIN) |
| 10 #include <windows.h> | 10 #include <windows.h> |
| (...skipping 683 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 694 return channel_->Send(message); | 694 return channel_->Send(message); |
| 695 } | 695 } |
| 696 | 696 |
| 697 void GpuChannel::OnStreamRescheduled(int32_t stream_id, bool scheduled) { | 697 void GpuChannel::OnStreamRescheduled(int32_t stream_id, bool scheduled) { |
| 698 scoped_refptr<GpuChannelMessageQueue> queue = LookupStream(stream_id); | 698 scoped_refptr<GpuChannelMessageQueue> queue = LookupStream(stream_id); |
| 699 DCHECK(queue); | 699 DCHECK(queue); |
| 700 queue->OnRescheduled(scheduled); | 700 queue->OnRescheduled(scheduled); |
| 701 } | 701 } |
| 702 | 702 |
| 703 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { | 703 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { |
| 704 return stubs_.get(route_id); | 704 auto it = stubs_.find(route_id); |
| 705 if (it == stubs_.end()) | |
| 706 return nullptr; | |
| 707 | |
| 708 return it->second.get(); | |
| 705 } | 709 } |
| 706 | 710 |
| 707 void GpuChannel::LoseAllContexts() { | 711 void GpuChannel::LoseAllContexts() { |
| 708 gpu_channel_manager_->LoseAllContexts(); | 712 gpu_channel_manager_->LoseAllContexts(); |
| 709 } | 713 } |
| 710 | 714 |
| 711 void GpuChannel::MarkAllContextsLost() { | 715 void GpuChannel::MarkAllContextsLost() { |
| 712 for (auto& kv : stubs_) | 716 for (auto& kv : stubs_) |
| 713 kv.second->MarkContextLost(); | 717 kv.second->MarkContextLost(); |
| 714 } | 718 } |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 765 | 769 |
| 766 void GpuChannel::HandleMessage( | 770 void GpuChannel::HandleMessage( |
| 767 const scoped_refptr<GpuChannelMessageQueue>& message_queue) { | 771 const scoped_refptr<GpuChannelMessageQueue>& message_queue) { |
| 768 const GpuChannelMessage* channel_msg = | 772 const GpuChannelMessage* channel_msg = |
| 769 message_queue->BeginMessageProcessing(); | 773 message_queue->BeginMessageProcessing(); |
| 770 if (!channel_msg) | 774 if (!channel_msg) |
| 771 return; | 775 return; |
| 772 | 776 |
| 773 const IPC::Message& msg = channel_msg->message; | 777 const IPC::Message& msg = channel_msg->message; |
| 774 int32_t routing_id = msg.routing_id(); | 778 int32_t routing_id = msg.routing_id(); |
| 775 GpuCommandBufferStub* stub = stubs_.get(routing_id); | 779 GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); |
| 776 | 780 |
| 777 DCHECK(!stub || stub->IsScheduled()); | 781 DCHECK(!stub || stub->IsScheduled()); |
| 778 | 782 |
| 779 DVLOG(1) << "received message @" << &msg << " on channel @" << this | 783 DVLOG(1) << "received message @" << &msg << " on channel @" << this |
| 780 << " with type " << msg.type(); | 784 << " with type " << msg.type(); |
| 781 | 785 |
| 782 HandleMessageHelper(msg); | 786 HandleMessageHelper(msg); |
| 783 | 787 |
| 784 // If we get descheduled or yield while processing a message. | 788 // If we get descheduled or yield while processing a message. |
| 785 if ((stub && stub->HasUnprocessedCommands()) || | 789 if ((stub && stub->HasUnprocessedCommands()) || |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 866 DCHECK(streams_.find(stream_id) != streams_.end()); | 870 DCHECK(streams_.find(stream_id) != streams_.end()); |
| 867 routes_to_streams_.erase(route_id); | 871 routes_to_streams_.erase(route_id); |
| 868 streams_to_num_routes_[stream_id]--; | 872 streams_to_num_routes_[stream_id]--; |
| 869 filter_->RemoveRoute(route_id); | 873 filter_->RemoveRoute(route_id); |
| 870 DestroyStreamIfNecessary(streams_[stream_id]); | 874 DestroyStreamIfNecessary(streams_[stream_id]); |
| 871 } | 875 } |
| 872 | 876 |
| 873 #if defined(OS_ANDROID) | 877 #if defined(OS_ANDROID) |
| 874 const GpuCommandBufferStub* GpuChannel::GetOneStub() const { | 878 const GpuCommandBufferStub* GpuChannel::GetOneStub() const { |
| 875 for (const auto& kv : stubs_) { | 879 for (const auto& kv : stubs_) { |
| 876 const GpuCommandBufferStub* stub = kv.second; | 880 const GpuCommandBufferStub* stub = kv.second.get(); |
| 877 if (stub->decoder() && !stub->decoder()->WasContextLost()) | 881 if (stub->decoder() && !stub->decoder()->WasContextLost()) |
| 878 return stub; | 882 return stub; |
| 879 } | 883 } |
| 880 return nullptr; | 884 return nullptr; |
| 881 } | 885 } |
| 882 #endif | 886 #endif |
| 883 | 887 |
| 884 void GpuChannel::OnCreateCommandBuffer( | 888 void GpuChannel::OnCreateCommandBuffer( |
| 885 const GPUCreateCommandBufferConfig& init_params, | 889 const GPUCreateCommandBufferConfig& init_params, |
| 886 int32_t route_id, | 890 int32_t route_id, |
| 887 base::SharedMemoryHandle shared_state_handle, | 891 base::SharedMemoryHandle shared_state_handle, |
| 888 bool* result, | 892 bool* result, |
| 889 gpu::Capabilities* capabilities) { | 893 gpu::Capabilities* capabilities) { |
| 890 TRACE_EVENT2("gpu", "GpuChannel::OnCreateCommandBuffer", "route_id", route_id, | 894 TRACE_EVENT2("gpu", "GpuChannel::OnCreateCommandBuffer", "route_id", route_id, |
| 891 "offscreen", (init_params.surface_handle == kNullSurfaceHandle)); | 895 "offscreen", (init_params.surface_handle == kNullSurfaceHandle)); |
| 892 std::unique_ptr<base::SharedMemory> shared_state_shm( | 896 std::unique_ptr<base::SharedMemory> shared_state_shm( |
| 893 new base::SharedMemory(shared_state_handle, false)); | 897 new base::SharedMemory(shared_state_handle, false)); |
| 894 std::unique_ptr<GpuCommandBufferStub> stub = | 898 std::unique_ptr<GpuCommandBufferStub> stub = |
| 895 CreateCommandBuffer(init_params, route_id, std::move(shared_state_shm)); | 899 CreateCommandBuffer(init_params, route_id, std::move(shared_state_shm)); |
| 896 if (stub) { | 900 if (stub) { |
| 897 *result = true; | 901 *result = true; |
| 898 *capabilities = stub->decoder()->GetCapabilities(); | 902 *capabilities = stub->decoder()->GetCapabilities(); |
| 899 stubs_.set(route_id, std::move(stub)); | 903 stubs_[route_id] = std::move(stub); |
|
Ken Russell (switch to Gerrit)
2016/12/29 05:41:15
I confirmed that forgetting the std::move here is
Avi (use Gerrit)
2016/12/29 15:18:02
Acknowledged.
| |
| 900 } else { | 904 } else { |
| 901 *result = false; | 905 *result = false; |
| 902 *capabilities = gpu::Capabilities(); | 906 *capabilities = gpu::Capabilities(); |
| 903 } | 907 } |
| 904 } | 908 } |
| 905 | 909 |
| 906 std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( | 910 std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( |
| 907 const GPUCreateCommandBufferConfig& init_params, | 911 const GPUCreateCommandBufferConfig& init_params, |
| 908 int32_t route_id, | 912 int32_t route_id, |
| 909 std::unique_ptr<base::SharedMemory> shared_state_shm) { | 913 std::unique_ptr<base::SharedMemory> shared_state_shm) { |
| 910 if (init_params.surface_handle != kNullSurfaceHandle && | 914 if (init_params.surface_handle != kNullSurfaceHandle && |
| 911 !allow_view_command_buffers_) { | 915 !allow_view_command_buffers_) { |
| 912 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " | 916 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " |
| 913 "view context on a non-priviledged channel"; | 917 "view context on a non-priviledged channel"; |
| 914 return nullptr; | 918 return nullptr; |
| 915 } | 919 } |
| 916 | 920 |
| 917 int32_t share_group_id = init_params.share_group_id; | 921 int32_t share_group_id = init_params.share_group_id; |
| 918 GpuCommandBufferStub* share_group = stubs_.get(share_group_id); | 922 GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id); |
| 919 | 923 |
| 920 if (!share_group && share_group_id != MSG_ROUTING_NONE) { | 924 if (!share_group && share_group_id != MSG_ROUTING_NONE) { |
| 921 DLOG(ERROR) | 925 DLOG(ERROR) |
| 922 << "GpuChannel::CreateCommandBuffer(): invalid share group id"; | 926 << "GpuChannel::CreateCommandBuffer(): invalid share group id"; |
| 923 return nullptr; | 927 return nullptr; |
| 924 } | 928 } |
| 925 | 929 |
| 926 int32_t stream_id = init_params.stream_id; | 930 int32_t stream_id = init_params.stream_id; |
| 927 if (share_group && stream_id != share_group->stream_id()) { | 931 if (share_group && stream_id != share_group->stream_id()) { |
| 928 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): stream id does not " | 932 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): stream id does not " |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 970 return nullptr; | 974 return nullptr; |
| 971 } | 975 } |
| 972 | 976 |
| 973 return stub; | 977 return stub; |
| 974 } | 978 } |
| 975 | 979 |
| 976 void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) { | 980 void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) { |
| 977 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", | 981 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", |
| 978 "route_id", route_id); | 982 "route_id", route_id); |
| 979 | 983 |
| 980 std::unique_ptr<GpuCommandBufferStub> stub = stubs_.take_and_erase(route_id); | 984 std::unique_ptr<GpuCommandBufferStub> stub; |
| 985 auto it = stubs_.find(route_id); | |
| 986 if (it != stubs_.end()) { | |
| 987 stub = std::move(it->second); | |
| 988 stubs_.erase(it); | |
| 989 } | |
| 981 // In case the renderer is currently blocked waiting for a sync reply from the | 990 // In case the renderer is currently blocked waiting for a sync reply from the |
| 982 // stub, we need to make sure to reschedule the correct stream here. | 991 // stub, we need to make sure to reschedule the correct stream here. |
| 983 if (stub && !stub->IsScheduled()) { | 992 if (stub && !stub->IsScheduled()) { |
| 984 // This stub won't get a chance to reschedule the stream so do that now. | 993 // This stub won't get a chance to reschedule the stream so do that now. |
| 985 OnStreamRescheduled(stub->stream_id(), true); | 994 OnStreamRescheduled(stub->stream_id(), true); |
| 986 } | 995 } |
| 987 | 996 |
| 988 RemoveRoute(route_id); | 997 RemoveRoute(route_id); |
| 989 } | 998 } |
| 990 | 999 |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1057 | 1066 |
| 1058 return manager->gpu_memory_buffer_factory() | 1067 return manager->gpu_memory_buffer_factory() |
| 1059 ->AsImageFactory() | 1068 ->AsImageFactory() |
| 1060 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, | 1069 ->CreateImageForGpuMemoryBuffer(handle, size, format, internalformat, |
| 1061 client_id_, surface_handle); | 1070 client_id_, surface_handle); |
| 1062 } | 1071 } |
| 1063 } | 1072 } |
| 1064 } | 1073 } |
| 1065 | 1074 |
| 1066 } // namespace gpu | 1075 } // namespace gpu |
| OLD | NEW |