Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1027)

Unified Diff: gpu/ipc/service/gpu_channel.h

Issue 2440093003: WIP GPU scheduler + delayed activation / tile draw
Patch Set: SignalSyncToken -> IsFenceSyncReleased Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « gpu/ipc/service/BUILD.gn ('k') | gpu/ipc/service/gpu_channel.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: gpu/ipc/service/gpu_channel.h
diff --git a/gpu/ipc/service/gpu_channel.h b/gpu/ipc/service/gpu_channel.h
index 47c1ba63604df7fe0b6ed6da742fc4575d9acda9..d526e15c32ca2af8624a81dfcdce0f7768da0318 100644
--- a/gpu/ipc/service/gpu_channel.h
+++ b/gpu/ipc/service/gpu_channel.h
@@ -8,8 +8,11 @@
#include <stddef.h>
#include <stdint.h>
+#include <deque>
#include <memory>
#include <string>
+#include <unordered_map>
+#include <vector>
#include "base/containers/hash_tables.h"
#include "base/containers/scoped_ptr_hash_map.h"
@@ -23,6 +26,7 @@
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/gpu_stream_constants.h"
#include "gpu/ipc/service/gpu_command_buffer_stub.h"
+#include "gpu/ipc/service/gpu_command_stream.h"
#include "gpu/ipc/service/gpu_memory_manager.h"
#include "ipc/ipc_sync_channel.h"
#include "ipc/message_router.h"
@@ -50,23 +54,25 @@ class GpuChannelManager;
class GpuChannelMessageFilter;
class GpuChannelMessageQueue;
class GpuWatchdogThread;
+class GpuScheduler;
// Encapsulates an IPC channel between the GPU process and one renderer
// process. On the renderer side there's a corresponding GpuChannelHost.
-class GPU_EXPORT GpuChannel
- : public IPC::Listener,
- public IPC::Sender {
+class GPU_EXPORT GpuChannel : public IPC::Listener,
+ public IPC::Sender,
+ public base::SupportsWeakPtr<GpuChannel> {
public:
// Takes ownership of the renderer process handle.
GpuChannel(GpuChannelManager* gpu_channel_manager,
SyncPointManager* sync_point_manager,
GpuWatchdogThread* watchdog,
- gl::GLShareGroup* share_group,
- gles2::MailboxManager* mailbox_manager,
- PreemptionFlag* preempting_flag,
- PreemptionFlag* preempted_flag,
- base::SingleThreadTaskRunner* task_runner,
- base::SingleThreadTaskRunner* io_task_runner,
+ GpuScheduler* scheduler,
+ scoped_refptr<gl::GLShareGroup> share_group,
+ scoped_refptr<gles2::MailboxManager> mailbox_manager,
+ scoped_refptr<PreemptionFlag> preempting_flag,
+ scoped_refptr<PreemptionFlag> preemption_flag,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
int32_t client_id,
uint64_t client_tracing_id,
bool allow_view_command_buffers,
@@ -96,22 +102,18 @@ class GPU_EXPORT GpuChannel
return task_runner_;
}
- const scoped_refptr<PreemptionFlag>& preempted_flag() const {
- return preempted_flag_;
- }
-
virtual base::ProcessId GetClientPID() const;
int client_id() const { return client_id_; }
uint64_t client_tracing_id() const { return client_tracing_id_; }
- base::WeakPtr<GpuChannel> AsWeakPtr();
-
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner() const {
return io_task_runner_;
}
+ base::Callback<bool(void)> GetPreemptionCallback() const;
+
// IPC::Listener implementation:
bool OnMessageReceived(const IPC::Message& msg) override;
void OnChannelConnected(int32_t peer_pid) override;
@@ -120,7 +122,8 @@ class GPU_EXPORT GpuChannel
// IPC::Sender implementation:
bool Send(IPC::Message* msg) override;
- void OnStreamRescheduled(int32_t stream_id, bool scheduled);
+ void ScheduleCommandBuffer(GpuCommandBufferStub* stub);
+ void DescheduleCommandBuffer(GpuCommandBufferStub* stub);
gl::GLShareGroup* share_group() const { return share_group_.get(); }
@@ -159,11 +162,13 @@ class GPU_EXPORT GpuChannel
uint32_t GetUnprocessedOrderNum() const;
// Returns the shared sync point global order data for the stream.
- scoped_refptr<SyncPointOrderData> GetSyncPointOrderData(
- int32_t stream_id);
+ scoped_refptr<SyncPointOrderData> GetSyncPointOrderData(int32_t stream_id);
- void PostHandleOutOfOrderMessage(const IPC::Message& message);
- void PostHandleMessage(const scoped_refptr<GpuChannelMessageQueue>& queue);
+ void PostHandleOutOfOrderMessage(const IPC::Message& msg);
+
+ void PostHandleMessageOnStream(scoped_refptr<GpuChannelMessageQueue> stream);
+
+ void HandleMessageOnStream(scoped_refptr<GpuChannelMessageQueue> stream);
// Synchronously handle the message to make testing convenient.
void HandleMessageForTesting(const IPC::Message& msg);
@@ -177,33 +182,30 @@ class GPU_EXPORT GpuChannel
scoped_refptr<GpuChannelMessageFilter> filter_;
// Map of routing id to command buffer stub.
- base::ScopedPtrHashMap<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
+ std::unordered_map<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
+
+ std::unordered_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> streams_;
private:
friend class TestGpuChannel;
bool OnControlMessageReceived(const IPC::Message& msg);
- void HandleMessage(const scoped_refptr<GpuChannelMessageQueue>& queue);
+ void HandleMessageHelper(const IPC::Message& msg);
// Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are
// processed as soon as possible because the client is blocked until they
// are completed.
void HandleOutOfOrderMessage(const IPC::Message& msg);
- void HandleMessageHelper(const IPC::Message& msg);
-
scoped_refptr<GpuChannelMessageQueue> CreateStream(
int32_t stream_id,
GpuStreamPriority stream_priority);
scoped_refptr<GpuChannelMessageQueue> LookupStream(int32_t stream_id);
- void DestroyStreamIfNecessary(
- const scoped_refptr<GpuChannelMessageQueue>& queue);
-
- void AddRouteToStream(int32_t route_id, int32_t stream_id);
- void RemoveRouteFromStream(int32_t route_id);
+ scoped_refptr<GpuChannelMessageQueue> LookupStreamByCommandBufferId(
+ CommandBufferId command_buffer_id);
// Message handlers for control messages.
void OnCreateCommandBuffer(const GPUCreateCommandBufferConfig& init_params,
@@ -215,7 +217,7 @@ class GPU_EXPORT GpuChannel
void OnGetDriverBugWorkArounds(
std::vector<std::string>* gpu_driver_bug_workarounds);
- std::unique_ptr<GpuCommandBufferStub> CreateCommandBuffer(
+ GpuCommandBufferStub* CreateCommandBuffer(
const GPUCreateCommandBufferConfig& init_params,
int32_t route_id,
std::unique_ptr<base::SharedMemory> shared_state_shm);
@@ -229,6 +231,8 @@ class GPU_EXPORT GpuChannel
// message loop.
SyncPointManager* const sync_point_manager_;
+ GpuScheduler* scheduler_;
+
std::unique_ptr<IPC::SyncChannel> channel_;
IPC::Listener* unhandled_message_listener_;
@@ -236,13 +240,13 @@ class GPU_EXPORT GpuChannel
// Used to implement message routing functionality to CommandBuffer objects
IPC::MessageRouter router_;
- // Whether the processing of IPCs on this channel is stalled and we should
- // preempt other GpuChannels.
+ // This flag if provided, should be set when processing of IPC messages on
+ // this channel is stalled and we should preempt other channels.
scoped_refptr<PreemptionFlag> preempting_flag_;
- // If non-NULL, all stubs on this channel should stop processing GL
- // commands (via their CommandExecutor) when preempted_flag_->IsSet()
- scoped_refptr<PreemptionFlag> preempted_flag_;
+ // This flag if provided, tells command buffers to stop processing commands
+ // and yield when set.
+ scoped_refptr<PreemptionFlag> preemption_flag_;
// The id of the client who is on the other side of the channel.
const int32_t client_id_;
@@ -262,15 +266,6 @@ class GPU_EXPORT GpuChannel
GpuWatchdogThread* const watchdog_;
- // Map of stream id to appropriate message queue.
- base::hash_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> streams_;
-
- // Multimap of stream id to route ids.
- base::hash_map<int32_t, int> streams_to_num_routes_;
-
- // Map of route id to stream id;
- base::hash_map<int32_t, int32_t> routes_to_streams_;
-
// Can view command buffers be created on this channel.
const bool allow_view_command_buffers_;
@@ -279,11 +274,6 @@ class GPU_EXPORT GpuChannel
base::ProcessId peer_pid_;
- // Member variables should appear before the WeakPtrFactory, to ensure
- // that any WeakPtrs to Controller are invalidated before its members
- // variable's destructors are executed, rendering them invalid.
- base::WeakPtrFactory<GpuChannel> weak_factory_;
-
DISALLOW_COPY_AND_ASSIGN(GpuChannel);
};
@@ -300,7 +290,8 @@ class GPU_EXPORT GpuChannel
// - it generates mailbox names for clients of the GPU process on the IO thread.
class GPU_EXPORT GpuChannelMessageFilter : public IPC::MessageFilter {
public:
- GpuChannelMessageFilter();
+ static scoped_refptr<GpuChannelMessageFilter> Create(GpuChannel* gpu_channel);
+ void Disable();
// IPC::MessageFilter implementation.
void OnFilterAdded(IPC::Channel* channel) override;
@@ -310,30 +301,31 @@ class GPU_EXPORT GpuChannelMessageFilter : public IPC::MessageFilter {
void OnChannelClosing() override;
bool OnMessageReceived(const IPC::Message& message) override;
+ void AddRoute(int32_t route_id, scoped_refptr<GpuChannelMessageQueue> stream);
+ scoped_refptr<GpuChannelMessageQueue> RemoveRoute(int32_t route_id);
+
void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
- void AddRoute(int32_t route_id,
- const scoped_refptr<GpuChannelMessageQueue>& queue);
- void RemoveRoute(int32_t route_id);
-
bool Send(IPC::Message* message);
protected:
+ GpuChannelMessageFilter(GpuChannel* gpu_channel);
~GpuChannelMessageFilter() override;
private:
- scoped_refptr<GpuChannelMessageQueue> LookupStreamByRoute(int32_t route_id);
+ scoped_refptr<GpuChannelMessageQueue> LookupRoute(int32_t route_id);
bool MessageErrorHandler(const IPC::Message& message, const char* error_msg);
- // Map of route id to message queue.
- base::hash_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> routes_;
- base::Lock routes_lock_; // Protects |routes_|.
-
- IPC::Channel* channel_;
- base::ProcessId peer_pid_;
+ bool enabled_ = true;
+ GpuChannel* gpu_channel_ = nullptr;
+ std::unordered_map<int32_t, scoped_refptr<GpuChannelMessageQueue>> routes_;
std::vector<scoped_refptr<IPC::MessageFilter>> channel_filters_;
+ base::Lock lock_; // Protects the above variables.
+
+ IPC::Channel* channel_ = nullptr;
+ base::ProcessId peer_pid_ = base::kNullProcessId;
DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageFilter);
};
@@ -353,29 +345,38 @@ struct GpuChannelMessage {
};
class GpuChannelMessageQueue
- : public base::RefCountedThreadSafe<GpuChannelMessageQueue> {
+ : public GpuCommandStream,
+ public base::RefCountedThreadSafe<GpuChannelMessageQueue> {
public:
static scoped_refptr<GpuChannelMessageQueue> Create(
int32_t stream_id,
GpuStreamPriority stream_priority,
GpuChannel* channel,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
- const scoped_refptr<PreemptionFlag>& preempting_flag,
- const scoped_refptr<PreemptionFlag>& preempted_flag,
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
+ GpuScheduler* scheduler,
+ scoped_refptr<PreemptionFlag> preempting_flag,
+ scoped_refptr<PreemptionFlag> preemption_flag,
SyncPointManager* sync_point_manager);
- void Disable();
- void DisableIO();
+ void Destroy();
+ void DestroyIO();
+
+ void OnRouteAdded();
+ void OnRouteRemoved();
+ size_t NumRoutes() const;
+
+ // GpuCommandStream implementation.
+ void Run() override;
int32_t stream_id() const { return stream_id_; }
- GpuStreamPriority stream_priority() const { return stream_priority_; }
+ GpuStreamPriority stream_priority() const { return priority_; }
bool IsScheduled() const;
void OnRescheduled(bool scheduled);
- bool HasQueuedMessages() const;
-
- base::TimeTicks GetNextMessageTimeTick() const;
+ void Schedule();
+ void Deschedule();
scoped_refptr<SyncPointOrderData> GetSyncPointOrderData();
@@ -385,6 +386,10 @@ class GpuChannelMessageQueue
// Returns the global order number for the last unprocessed IPC message.
uint32_t GetProcessedOrderNum() const;
+ bool HasMessages() const;
+
+ void PushBackMessage(const IPC::Message& message);
+
// Should be called before a message begins to be processed. Returns false if
// there are no messages to process.
const GpuChannelMessage* BeginMessageProcessing();
@@ -394,8 +399,6 @@ class GpuChannelMessageQueue
// there are more messages to process.
void FinishMessageProcessing();
- bool PushBackMessage(const IPC::Message& message);
-
private:
enum PreemptionState {
// Either there's no other channel to preempt, there are no messages
@@ -419,12 +422,15 @@ class GpuChannelMessageQueue
int32_t stream_id,
GpuStreamPriority stream_priority,
GpuChannel* channel,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
- const scoped_refptr<PreemptionFlag>& preempting_flag,
- const scoped_refptr<PreemptionFlag>& preempted_flag,
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
+ GpuScheduler* scheduler,
+ scoped_refptr<PreemptionFlag> preempting_flag,
+ scoped_refptr<PreemptionFlag> preemption_flag,
SyncPointManager* sync_point_manager);
- ~GpuChannelMessageQueue();
+ ~GpuChannelMessageQueue() override;
+ void PostUpdatePreemptionState();
void UpdatePreemptionState();
void UpdatePreemptionStateHelper();
@@ -443,13 +449,13 @@ class GpuChannelMessageQueue
bool ShouldTransitionToIdle() const;
const int32_t stream_id_;
- const GpuStreamPriority stream_priority_;
+ const GpuStreamPriority priority_;
+ size_t num_routes_;
// These can be accessed from both IO and main threads and are protected by
// |channel_lock_|.
- bool enabled_;
bool scheduled_;
- GpuChannel* const channel_;
+ GpuChannel* channel_;
std::deque<std::unique_ptr<GpuChannelMessage>> channel_messages_;
mutable base::Lock channel_lock_;
@@ -460,16 +466,18 @@ class GpuChannelMessageQueue
// Maximum amount of time that we can spend in PREEMPTING.
// It is reset when we transition to IDLE.
base::TimeDelta max_preemption_time_;
- // This timer is used and runs tasks on the IO thread.
+ // This timer is used to run tasks on the IO thread.
std::unique_ptr<base::OneShotTimer> timer_;
base::ThreadChecker io_thread_checker_;
// Keeps track of sync point related state such as message order numbers.
scoped_refptr<SyncPointOrderData> sync_point_order_data_;
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+ GpuScheduler* const scheduler_;
scoped_refptr<PreemptionFlag> preempting_flag_;
- scoped_refptr<PreemptionFlag> preempted_flag_;
+ scoped_refptr<PreemptionFlag> preemption_flag_;
SyncPointManager* const sync_point_manager_;
DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue);
« no previous file with comments | « gpu/ipc/service/BUILD.gn ('k') | gpu/ipc/service/gpu_channel.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698