Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(51)

Side by Side Diff: src/compiler/scheduler.cc

Issue 899433005: [turbofan] Split pure nodes in the scheduler if beneficial. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: comments Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/flag-definitions.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/scheduler.h" 5 #include "src/compiler/scheduler.h"
6 6
7 #include "src/bit-vector.h" 7 #include "src/bit-vector.h"
8 #include "src/compiler/common-operator.h" 8 #include "src/compiler/common-operator.h"
9 #include "src/compiler/control-equivalence.h" 9 #include "src/compiler/control-equivalence.h"
10 #include "src/compiler/graph.h" 10 #include "src/compiler/graph.h"
11 #include "src/compiler/node.h" 11 #include "src/compiler/node.h"
12 #include "src/compiler/node-marker.h" 12 #include "src/compiler/node-marker.h"
13 #include "src/compiler/node-properties.h" 13 #include "src/compiler/node-properties.h"
14 #include "src/zone-containers.h" 14 #include "src/zone-containers.h"
15 15
16 namespace v8 { 16 namespace v8 {
17 namespace internal { 17 namespace internal {
18 namespace compiler { 18 namespace compiler {
19 19
20 static inline void Trace(const char* msg, ...) { 20 static inline void Trace(const char* msg, ...) {
21 if (FLAG_trace_turbo_scheduler) { 21 if (FLAG_trace_turbo_scheduler) {
22 va_list arguments; 22 va_list arguments;
23 va_start(arguments, msg); 23 va_start(arguments, msg);
24 base::OS::VPrint(msg, arguments); 24 base::OS::VPrint(msg, arguments);
25 va_end(arguments); 25 va_end(arguments);
26 } 26 }
27 } 27 }
28 28
29 29
30 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) 30 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
31 : zone_(zone), 31 : zone_(zone),
32 graph_(graph), 32 graph_(graph),
33 schedule_(schedule), 33 schedule_(schedule),
34 flags_(flags),
34 scheduled_nodes_(zone), 35 scheduled_nodes_(zone),
35 schedule_root_nodes_(zone), 36 schedule_root_nodes_(zone),
36 schedule_queue_(zone), 37 schedule_queue_(zone),
37 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {} 38 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
38 39
39 40
40 Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph) { 41 Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
41 Schedule* schedule = new (graph->zone()) 42 Schedule* schedule = new (graph->zone())
42 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount())); 43 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
43 Scheduler scheduler(zone, graph, schedule); 44 Scheduler scheduler(zone, graph, schedule, flags);
44 45
45 scheduler.BuildCFG(); 46 scheduler.BuildCFG();
46 scheduler.ComputeSpecialRPONumbering(); 47 scheduler.ComputeSpecialRPONumbering();
47 scheduler.GenerateImmediateDominatorTree(); 48 scheduler.GenerateImmediateDominatorTree();
48 49
49 scheduler.PrepareUses(); 50 scheduler.PrepareUses();
50 scheduler.ScheduleEarly(); 51 scheduler.ScheduleEarly();
51 scheduler.ScheduleLate(); 52 scheduler.ScheduleLate();
52 53
53 scheduler.SealFinalSchedule(); 54 scheduler.SealFinalSchedule();
(...skipping 1165 matching lines...) Expand 10 before | Expand all | Expand 10 after
1219 } 1220 }
1220 1221
1221 1222
1222 // ----------------------------------------------------------------------------- 1223 // -----------------------------------------------------------------------------
1223 // Phase 5: Schedule nodes late. 1224 // Phase 5: Schedule nodes late.
1224 1225
1225 1226
1226 class ScheduleLateNodeVisitor { 1227 class ScheduleLateNodeVisitor {
1227 public: 1228 public:
1228 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler) 1229 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
1229 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} 1230 : scheduler_(scheduler),
1231 schedule_(scheduler_->schedule_),
1232 marked_(scheduler->zone_),
1233 marking_queue_(scheduler->zone_) {}
1230 1234
1231 // Run the schedule late algorithm on a set of fixed root nodes. 1235 // Run the schedule late algorithm on a set of fixed root nodes.
1232 void Run(NodeVector* roots) { 1236 void Run(NodeVector* roots) {
1233 for (Node* const root : *roots) { 1237 for (Node* const root : *roots) {
1234 ProcessQueue(root); 1238 ProcessQueue(root);
1235 } 1239 }
1236 } 1240 }
1237 1241
1238 private: 1242 private:
1239 void ProcessQueue(Node* root) { 1243 void ProcessQueue(Node* root) {
1240 ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_); 1244 ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
1241 for (Node* node : root->inputs()) { 1245 for (Node* node : root->inputs()) {
1242 // Don't schedule coupled nodes on their own. 1246 // Don't schedule coupled nodes on their own.
1243 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) { 1247 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1244 node = NodeProperties::GetControlInput(node); 1248 node = NodeProperties::GetControlInput(node);
1245 } 1249 }
1246 1250
1247 // Test schedulability condition by looking at unscheduled use count. 1251 // Test schedulability condition by looking at unscheduled use count.
1248 if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue; 1252 if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1249 1253
1250 queue->push(node); 1254 queue->push(node);
1251 while (!queue->empty()) { 1255 do {
1252 VisitNode(queue->front()); 1256 Node* const node = queue->front();
1253 queue->pop(); 1257 queue->pop();
1254 } 1258 VisitNode(node);
1259 } while (!queue->empty());
1255 } 1260 }
1256 } 1261 }
1257 1262
1258 // Visits one node from the queue of schedulable nodes and determines its 1263 // Visits one node from the queue of schedulable nodes and determines its
1259 // schedule late position. Also hoists nodes out of loops to find a more 1264 // schedule late position. Also hoists nodes out of loops to find a more
1260 // optimal scheduling position. 1265 // optimal scheduling position.
1261 void VisitNode(Node* node) { 1266 void VisitNode(Node* node) {
1262 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_); 1267 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1263 1268
1264 // Don't schedule nodes that are already scheduled. 1269 // Don't schedule nodes that are already scheduled.
(...skipping 10 matching lines...) Expand all
1275 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_; 1280 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1276 DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block)); 1281 DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1277 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n", 1282 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
1278 node->id(), node->op()->mnemonic(), block->id().ToInt(), 1283 node->id(), node->op()->mnemonic(), block->id().ToInt(),
1279 block->loop_depth(), min_block->id().ToInt()); 1284 block->loop_depth(), min_block->id().ToInt());
1280 1285
1281 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively 1286 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1282 // into enclosing loop pre-headers until they would preceed their schedule 1287 // into enclosing loop pre-headers until they would preceed their schedule
1283 // early position. 1288 // early position.
1284 BasicBlock* hoist_block = GetPreHeader(block); 1289 BasicBlock* hoist_block = GetPreHeader(block);
1285 while (hoist_block != NULL && 1290 if (hoist_block &&
1286 hoist_block->dominator_depth() >= min_block->dominator_depth()) { 1291 hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1287 Trace(" hoisting #%d:%s to block B%d\n", node->id(), 1292 do {
1288 node->op()->mnemonic(), hoist_block->id().ToInt()); 1293 Trace(" hoisting #%d:%s to block B%d\n", node->id(),
1289 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth()); 1294 node->op()->mnemonic(), hoist_block->id().ToInt());
1290 block = hoist_block; 1295 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1291 hoist_block = GetPreHeader(hoist_block); 1296 block = hoist_block;
1297 hoist_block = GetPreHeader(hoist_block);
1298 } while (hoist_block &&
1299 hoist_block->dominator_depth() >= min_block->dominator_depth());
1300 } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1301 // Split the {node} if beneficial and return the new {block} for it.
1302 block = SplitNode(block, node);
1292 } 1303 }
1293 1304
1294 // Schedule the node or a floating control structure. 1305 // Schedule the node or a floating control structure.
1295 if (NodeProperties::IsControl(node)) { 1306 if (NodeProperties::IsControl(node)) {
1296 ScheduleFloatingControl(block, node); 1307 ScheduleFloatingControl(block, node);
1297 } else { 1308 } else {
1298 ScheduleNode(block, node); 1309 ScheduleNode(block, node);
1299 } 1310 }
1300 } 1311 }
1301 1312
1313 // Mark {block} and push its non-marked predecessor on the marking queue.
1314 void MarkBlock(BasicBlock* block) {
1315 DCHECK_LT(block->id().ToSize(), marked_.size());
1316 marked_[block->id().ToSize()] = true;
1317 for (BasicBlock* pred_block : block->predecessors()) {
1318 DCHECK_LT(pred_block->id().ToSize(), marked_.size());
1319 if (marked_[pred_block->id().ToSize()]) continue;
1320 marking_queue_.push_back(pred_block);
1321 }
1322 }
1323
1324 BasicBlock* SplitNode(BasicBlock* block, Node* node) {
1325 // For now, we limit splitting to pure nodes.
1326 if (!node->op()->HasProperty(Operator::kPure)) return block;
1327
1328 // The {block} is common dominator of all uses of {node}, so we cannot
1329 // split anything unless the {block} has at least two successors.
1330 DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1331 if (block->SuccessorCount() < 2) return block;
1332
1333 // Clear marking bits.
1334 DCHECK(marking_queue_.empty());
1335 std::fill(marked_.begin(), marked_.end(), false);
1336 marked_.resize(schedule_->BasicBlockCount() + 1, false);
1337
1338 // Check if the {node} has uses in {block}.
1339 for (Edge edge : node->use_edges()) {
1340 BasicBlock* use_block = GetBlockForUse(edge);
1341 if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
1342 if (use_block == block) {
1343 Trace(" not splitting #%d:%s, it is used in B%d\n", node->id(),
1344 node->op()->mnemonic(), block->id().ToInt());
1345 marking_queue_.clear();
1346 return block;
1347 }
1348 MarkBlock(use_block);
1349 }
1350
1351 // Compute transitive marking closure; a block is marked if all its
1352 // successors are marked.
1353 do {
1354 BasicBlock* top_block = marking_queue_.front();
1355 marking_queue_.pop_front();
1356 if (marked_[top_block->id().ToSize()]) continue;
1357 bool marked = true;
1358 for (BasicBlock* successor : top_block->successors()) {
1359 if (!marked_[successor->id().ToSize()]) {
1360 marked = false;
1361 break;
1362 }
1363 }
1364 if (marked) MarkBlock(top_block);
1365 } while (!marking_queue_.empty());
1366
1367 // If the (common dominator) {block} is marked, we know that all paths from
1368 // {block} to the end contain at least one use of {node}, and hence there's
1369 // no point in splitting the {node} in this case.
1370 if (marked_[block->id().ToSize()]) {
1371 Trace(" not splitting #%d:%s, its common dominator B%d is perfect\n",
1372 node->id(), node->op()->mnemonic(), block->id().ToInt());
1373 return block;
1374 }
1375
1376 // Split {node} for uses according to the previously computed marking
1377 // closure. Every marking partition has a unique dominator, which get's a
1378 // copy of the {node} with the exception of the first partition, which get's
1379 // the {node} itself.
1380 ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
1381 for (Edge edge : node->use_edges()) {
1382 BasicBlock* use_block = GetBlockForUse(edge);
1383 if (use_block == nullptr) continue;
1384 while (marked_[use_block->dominator()->id().ToSize()]) {
1385 use_block = use_block->dominator();
1386 }
1387 auto& use_node = dominators[use_block];
1388 if (use_node == nullptr) {
1389 if (dominators.size() == 1u) {
1390 // Place the {node} at {use_block}.
1391 block = use_block;
1392 use_node = node;
1393 Trace(" pushing #%d:%s down to B%d\n", node->id(),
1394 node->op()->mnemonic(), block->id().ToInt());
1395 } else {
1396 // Place a copy of {node} at {use_block}.
1397 use_node = CloneNode(node);
1398 Trace(" cloning #%d:%s for B%d\n", use_node->id(),
1399 use_node->op()->mnemonic(), use_block->id().ToInt());
1400 scheduler_->schedule_queue_.push(use_node);
1401 }
1402 }
1403 edge.UpdateTo(use_node);
1404 }
1405 return block;
1406 }
1407
1302 BasicBlock* GetPreHeader(BasicBlock* block) { 1408 BasicBlock* GetPreHeader(BasicBlock* block) {
1303 if (block->IsLoopHeader()) { 1409 if (block->IsLoopHeader()) {
1304 return block->dominator(); 1410 return block->dominator();
1305 } else if (block->loop_header() != NULL) { 1411 } else if (block->loop_header() != NULL) {
1306 return block->loop_header()->dominator(); 1412 return block->loop_header()->dominator();
1307 } else { 1413 } else {
1308 return NULL; 1414 return NULL;
1309 } 1415 }
1310 } 1416 }
1311 1417
1312 BasicBlock* GetCommonDominatorOfUses(Node* node) { 1418 BasicBlock* GetCommonDominatorOfUses(Node* node) {
1313 BasicBlock* block = NULL; 1419 BasicBlock* block = nullptr;
1314 for (Edge edge : node->use_edges()) { 1420 for (Edge edge : node->use_edges()) {
1315 BasicBlock* use_block = GetBlockForUse(edge); 1421 BasicBlock* use_block = GetBlockForUse(edge);
1316 block = block == NULL ? use_block : use_block == NULL 1422 block = block == NULL ? use_block : use_block == NULL
1317 ? block 1423 ? block
1318 : BasicBlock::GetCommonDominator( 1424 : BasicBlock::GetCommonDominator(
1319 block, use_block); 1425 block, use_block);
1320 } 1426 }
1321 return block; 1427 return block;
1322 } 1428 }
1323 1429
(...skipping 30 matching lines...) Expand all
1354 void ScheduleFloatingControl(BasicBlock* block, Node* node) { 1460 void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1355 scheduler_->FuseFloatingControl(block, node); 1461 scheduler_->FuseFloatingControl(block, node);
1356 } 1462 }
1357 1463
1358 void ScheduleNode(BasicBlock* block, Node* node) { 1464 void ScheduleNode(BasicBlock* block, Node* node) {
1359 schedule_->PlanNode(block, node); 1465 schedule_->PlanNode(block, node);
1360 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node); 1466 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
1361 scheduler_->UpdatePlacement(node, Scheduler::kScheduled); 1467 scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1362 } 1468 }
1363 1469
1470 Node* CloneNode(Node* node) {
1471 int const input_count = node->InputCount();
1472 Node** const inputs = scheduler_->zone_->NewArray<Node*>(input_count);
1473 for (int index = 0; index < input_count; ++index) {
1474 Node* const input = node->InputAt(index);
1475 scheduler_->IncrementUnscheduledUseCount(input, index, node);
1476 inputs[index] = input;
1477 }
1478 Node* copy = scheduler_->graph_->NewNode(node->op(), input_count, inputs);
1479 scheduler_->node_data_.resize(copy->id() + 1,
1480 scheduler_->DefaultSchedulerData());
1481 scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1482 return copy;
1483 }
1484
1364 Scheduler* scheduler_; 1485 Scheduler* scheduler_;
1365 Schedule* schedule_; 1486 Schedule* schedule_;
1487 BoolVector marked_;
1488 ZoneDeque<BasicBlock*> marking_queue_;
1366 }; 1489 };
1367 1490
1368 1491
1369 void Scheduler::ScheduleLate() { 1492 void Scheduler::ScheduleLate() {
1370 Trace("--- SCHEDULE LATE ------------------------------------------\n"); 1493 Trace("--- SCHEDULE LATE ------------------------------------------\n");
1371 if (FLAG_trace_turbo_scheduler) { 1494 if (FLAG_trace_turbo_scheduler) {
1372 Trace("roots: "); 1495 Trace("roots: ");
1373 for (Node* node : schedule_root_nodes_) { 1496 for (Node* node : schedule_root_nodes_) {
1374 Trace("#%d:%s ", node->id(), node->op()->mnemonic()); 1497 Trace("#%d:%s ", node->id(), node->op()->mnemonic());
1375 } 1498 }
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
1466 for (Node* const node : *nodes) { 1589 for (Node* const node : *nodes) {
1467 schedule_->SetBlockForNode(to, node); 1590 schedule_->SetBlockForNode(to, node);
1468 scheduled_nodes_[to->id().ToSize()].push_back(node); 1591 scheduled_nodes_[to->id().ToSize()].push_back(node);
1469 } 1592 }
1470 nodes->clear(); 1593 nodes->clear();
1471 } 1594 }
1472 1595
1473 } // namespace compiler 1596 } // namespace compiler
1474 } // namespace internal 1597 } // namespace internal
1475 } // namespace v8 1598 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/flag-definitions.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698