Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(374)

Side by Side Diff: base/threading/sequenced_worker_pool.cc

Issue 2574403002: Add a TaskPriority cap to the SWP redirection to experiment with no redirections at USER_BLOCKING. (Closed)
Patch Set: Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/threading/sequenced_worker_pool.h" 5 #include "base/threading/sequenced_worker_pool.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <list> 9 #include <list>
10 #include <map> 10 #include <map>
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
68 POST_TASK_DISABLED, 68 POST_TASK_DISABLED,
69 USE_WORKER_POOL, 69 USE_WORKER_POOL,
70 REDIRECTED_TO_TASK_SCHEDULER, 70 REDIRECTED_TO_TASK_SCHEDULER,
71 }; 71 };
72 72
73 // TODO(fdoray): Change the initial state to POST_TASK_DISABLED. It is initially 73 // TODO(fdoray): Change the initial state to POST_TASK_DISABLED. It is initially
74 // USE_WORKER_POOL to avoid a revert of the CL that adds 74 // USE_WORKER_POOL to avoid a revert of the CL that adds
75 // debug::DumpWithoutCrashing() in case of waterfall failures. 75 // debug::DumpWithoutCrashing() in case of waterfall failures.
76 AllPoolsState g_all_pools_state = AllPoolsState::USE_WORKER_POOL; 76 AllPoolsState g_all_pools_state = AllPoolsState::USE_WORKER_POOL;
77 77
78 TaskPriority g_max_task_priority = TaskPriority::HIGHEST;
79
78 struct SequencedTask : public TrackingInfo { 80 struct SequencedTask : public TrackingInfo {
79 SequencedTask() 81 SequencedTask()
80 : sequence_token_id(0), 82 : sequence_token_id(0),
81 trace_id(0), 83 trace_id(0),
82 sequence_task_number(0), 84 sequence_task_number(0),
83 shutdown_behavior(SequencedWorkerPool::BLOCK_SHUTDOWN) {} 85 shutdown_behavior(SequencedWorkerPool::BLOCK_SHUTDOWN) {}
84 86
85 explicit SequencedTask(const tracked_objects::Location& from_here) 87 explicit SequencedTask(const tracked_objects::Location& from_here)
86 : base::TrackingInfo(from_here, TimeTicks()), 88 : base::TrackingInfo(from_here, TimeTicks()),
87 sequence_token_id(0), 89 sequence_token_id(0),
(...skipping 549 matching lines...) Expand 10 before | Expand all | Expand 10 after
637 blocking_shutdown_thread_count_(0), 639 blocking_shutdown_thread_count_(0),
638 next_sequence_task_number_(0), 640 next_sequence_task_number_(0),
639 blocking_shutdown_pending_task_count_(0), 641 blocking_shutdown_pending_task_count_(0),
640 trace_id_(0), 642 trace_id_(0),
641 shutdown_called_(false), 643 shutdown_called_(false),
642 max_blocking_tasks_after_shutdown_(0), 644 max_blocking_tasks_after_shutdown_(0),
643 cleanup_state_(CLEANUP_DONE), 645 cleanup_state_(CLEANUP_DONE),
644 cleanup_idlers_(0), 646 cleanup_idlers_(0),
645 cleanup_cv_(&lock_), 647 cleanup_cv_(&lock_),
646 testing_observer_(observer), 648 testing_observer_(observer),
647 task_priority_(task_priority) { 649 task_priority_(static_cast<int>(task_priority) <=
650 static_cast<int>(g_max_task_priority)
dcheng 2016/12/15 21:20:49 Yay, C++.
651 ? task_priority
652 : g_max_task_priority) {
648 DCHECK_GT(max_threads_, 1U); 653 DCHECK_GT(max_threads_, 1U);
649 } 654 }
650 655
651 SequencedWorkerPool::Inner::~Inner() { 656 SequencedWorkerPool::Inner::~Inner() {
652 // You must call Shutdown() before destroying the pool. 657 // You must call Shutdown() before destroying the pool.
653 DCHECK(shutdown_called_); 658 DCHECK(shutdown_called_);
654 659
655 // Need to explicitly join with the threads before they're destroyed or else 660 // Need to explicitly join with the threads before they're destroyed or else
656 // they will be running when our object is half torn down. 661 // they will be running when our object is half torn down.
657 for (ThreadMap::iterator it = threads_.begin(); it != threads_.end(); ++it) 662 for (ThreadMap::iterator it = threads_.begin(); it != threads_.end(); ++it)
(...skipping 757 matching lines...) Expand 10 before | Expand all | Expand 10 after
1415 // static 1420 // static
1416 void SequencedWorkerPool::EnableForProcess() { 1421 void SequencedWorkerPool::EnableForProcess() {
1417 // TODO(fdoray): Uncomment this line. It is initially commented to avoid a 1422 // TODO(fdoray): Uncomment this line. It is initially commented to avoid a
1418 // revert of the CL that adds debug::DumpWithoutCrashing() in case of 1423 // revert of the CL that adds debug::DumpWithoutCrashing() in case of
1419 // waterfall failures. 1424 // waterfall failures.
1420 // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state); 1425 // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
1421 g_all_pools_state = AllPoolsState::USE_WORKER_POOL; 1426 g_all_pools_state = AllPoolsState::USE_WORKER_POOL;
1422 } 1427 }
1423 1428
1424 // static 1429 // static
1425 void SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess() { 1430 void SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess(
1431 TaskPriority max_task_priority) {
1426 // TODO(fdoray): Uncomment this line. It is initially commented to avoid a 1432 // TODO(fdoray): Uncomment this line. It is initially commented to avoid a
1427 // revert of the CL that adds debug::DumpWithoutCrashing() in case of 1433 // revert of the CL that adds debug::DumpWithoutCrashing() in case of
1428 // waterfall failures. 1434 // waterfall failures.
1429 // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state); 1435 // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
1430 DCHECK(TaskScheduler::GetInstance()); 1436 DCHECK(TaskScheduler::GetInstance());
1431 g_all_pools_state = AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER; 1437 g_all_pools_state = AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER;
1438 g_max_task_priority = max_task_priority;
1432 } 1439 }
1433 1440
1434 // static 1441 // static
1435 void SequencedWorkerPool::DisableForProcessForTesting() { 1442 void SequencedWorkerPool::DisableForProcessForTesting() {
1436 g_all_pools_state = AllPoolsState::POST_TASK_DISABLED; 1443 g_all_pools_state = AllPoolsState::POST_TASK_DISABLED;
1437 } 1444 }
1438 1445
1439 // static 1446 // static
1440 bool SequencedWorkerPool::IsEnabled() { 1447 bool SequencedWorkerPool::IsEnabled() {
1441 return g_all_pools_state != AllPoolsState::POST_TASK_DISABLED; 1448 return g_all_pools_state != AllPoolsState::POST_TASK_DISABLED;
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
1597 bool SequencedWorkerPool::IsShutdownInProgress() { 1604 bool SequencedWorkerPool::IsShutdownInProgress() {
1598 return inner_->IsShutdownInProgress(); 1605 return inner_->IsShutdownInProgress();
1599 } 1606 }
1600 1607
1601 bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread( 1608 bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread(
1602 SequenceToken sequence_token) const { 1609 SequenceToken sequence_token) const {
1603 return inner_->IsRunningSequenceOnCurrentThread(sequence_token); 1610 return inner_->IsRunningSequenceOnCurrentThread(sequence_token);
1604 } 1611 }
1605 1612
1606 } // namespace base 1613 } // namespace base
OLDNEW
« no previous file with comments | « base/threading/sequenced_worker_pool.h ('k') | components/task_scheduler_util/variations/browser_variations_util.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698