Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Unified Diff: gpu/command_buffer/service/sync_point_manager.cc

Issue 849103002: Share SyncPointManager implementation in in-process cmd buffer (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: DCHECKs Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « gpu/command_buffer/service/sync_point_manager.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: gpu/command_buffer/service/sync_point_manager.cc
diff --git a/gpu/command_buffer/service/sync_point_manager.cc b/gpu/command_buffer/service/sync_point_manager.cc
index cd8c49088b4e79405f3abe814db9b7da70525a9b..9a5137d2cb13dd7e6e6aa73e27cbb782462083e5 100644
--- a/gpu/command_buffer/service/sync_point_manager.cc
+++ b/gpu/command_buffer/service/sync_point_manager.cc
@@ -13,17 +13,17 @@ namespace gpu {
static const int kMaxSyncBase = INT_MAX;
-SyncPointManager::SyncPointManager()
- : next_sync_point_(base::RandInt(1, kMaxSyncBase)) {
+ThreadedSyncPointManager::ThreadedSyncPointManager()
+ : next_sync_point_(base::RandInt(1, kMaxSyncBase)), cond_var_(&lock_) {
// To reduce the risk that a sync point created in a previous GPU process
// will be in flight in the next GPU process, randomize the starting sync
// point number. http://crbug.com/373452
}
-SyncPointManager::~SyncPointManager() {
+ThreadedSyncPointManager::~ThreadedSyncPointManager() {
}
-uint32 SyncPointManager::GenerateSyncPoint() {
+uint32 ThreadedSyncPointManager::GenerateSyncPoint() {
base::AutoLock lock(lock_);
uint32 sync_point = next_sync_point_++;
// When an integer overflow occurs, don't return 0.
@@ -40,27 +40,29 @@ uint32 SyncPointManager::GenerateSyncPoint() {
return sync_point;
}
-void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
- DCHECK(thread_checker_.CalledOnValidThread());
+void ThreadedSyncPointManager::RetireSyncPoint(uint32 sync_point) {
ClosureList list;
+ base::AutoLock lock(lock_);
+ SyncPointMap::iterator it = sync_point_map_.find(sync_point);
+ if (it == sync_point_map_.end()) {
+ LOG(ERROR) << "Attempted to retire sync point that"
+ " didn't exist or was already retired.";
+ return;
+ }
+ list.swap(it->second);
+ sync_point_map_.erase(it);
+
{
- base::AutoLock lock(lock_);
- SyncPointMap::iterator it = sync_point_map_.find(sync_point);
- if (it == sync_point_map_.end()) {
- LOG(ERROR) << "Attempted to retire sync point that"
- " didn't exist or was already retired.";
- return;
- }
- list.swap(it->second);
- sync_point_map_.erase(it);
+ base::AutoUnlock unlock(lock_);
+ for (ClosureList::iterator i = list.begin(); i != list.end(); ++i)
+ i->Run();
}
- for (ClosureList::iterator i = list.begin(); i != list.end(); ++i)
- i->Run();
+
+ cond_var_.Broadcast();
no sievers 2015/01/27 21:26:39 Can we somehow avoid dealing with the cond. var/lo
no sievers 2015/01/27 21:26:39 Also, it'd be slightly better to call Broadcast()
}
-void SyncPointManager::AddSyncPointCallback(uint32 sync_point,
+void ThreadedSyncPointManager::AddSyncPointCallback(uint32 sync_point,
const base::Closure& callback) {
- DCHECK(thread_checker_.CalledOnValidThread());
{
base::AutoLock lock(lock_);
SyncPointMap::iterator it = sync_point_map_.find(sync_point);
@@ -72,13 +74,50 @@ void SyncPointManager::AddSyncPointCallback(uint32 sync_point,
callback.Run();
}
-bool SyncPointManager::IsSyncPointRetired(uint32 sync_point) {
- DCHECK(thread_checker_.CalledOnValidThread());
+bool ThreadedSyncPointManager::IsSyncPointRetired(uint32 sync_point) {
{
base::AutoLock lock(lock_);
- SyncPointMap::iterator it = sync_point_map_.find(sync_point);
- return it == sync_point_map_.end();
+ return IsSyncPointRetiredLocked(sync_point);
}
}
+void ThreadedSyncPointManager::WaitSyncPoint(uint32 sync_point) {
+ base::AutoLock lock(lock_);
+ while (!IsSyncPointRetiredLocked(sync_point)) {
+ cond_var_.Wait();
+ }
+}
+
+bool ThreadedSyncPointManager::IsSyncPointRetiredLocked(uint32 sync_point) {
+ lock_.AssertAcquired();
+ SyncPointMap::iterator it = sync_point_map_.find(sync_point);
+ return it == sync_point_map_.end();
+}
+
+SyncPointManager::SyncPointManager() {
+}
+
+SyncPointManager::~SyncPointManager() {
+}
+
+uint32 SyncPointManager::GenerateSyncPoint() {
+ return threaded_sync_point_manager_.GenerateSyncPoint();
+}
+
+void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ threaded_sync_point_manager_.RetireSyncPoint(sync_point);
no sievers 2015/01/27 21:26:39 Since this only adds three DCHECKs(), can you eith
+}
+
+void SyncPointManager::AddSyncPointCallback(uint32 sync_point,
+ const base::Closure& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ threaded_sync_point_manager_.AddSyncPointCallback(sync_point, callback);
+}
+
+bool SyncPointManager::IsSyncPointRetired(uint32 sync_point) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return threaded_sync_point_manager_.IsSyncPointRetired(sync_point);
+}
+
} // namespace gpu
« no previous file with comments | « gpu/command_buffer/service/sync_point_manager.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698