Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(945)

Side by Side Diff: src/data-flow.cc

Issue 1253009: Move flow graph and helper classes to their own file. (Closed)
Patch Set: Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/data-flow.h ('k') | src/flow-graph.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 10 matching lines...) Expand all
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "data-flow.h" 30 #include "data-flow.h"
31 #include "flow-graph.h"
31 #include "scopes.h" 32 #include "scopes.h"
32 33
33 namespace v8 { 34 namespace v8 {
34 namespace internal { 35 namespace internal {
35 36
36 37
37 #ifdef DEBUG 38 #ifdef DEBUG
38 void BitVector::Print() { 39 void BitVector::Print() {
39 bool first = true; 40 bool first = true;
40 PrintF("{"); 41 PrintF("{");
41 for (int i = 0; i < length(); i++) { 42 for (int i = 0; i < length(); i++) {
42 if (Contains(i)) { 43 if (Contains(i)) {
43 if (!first) PrintF(","); 44 if (!first) PrintF(",");
44 first = false; 45 first = false;
45 PrintF("%d"); 46 PrintF("%d");
46 } 47 }
47 } 48 }
48 PrintF("}"); 49 PrintF("}");
49 } 50 }
50 #endif 51 #endif
51 52
52 53
53 void FlowGraph::AppendInstruction(AstNode* instruction) {
54 // Add a (non-null) AstNode to the end of the graph fragment.
55 ASSERT(instruction != NULL);
56 if (exit()->IsExitNode()) return;
57 if (!exit()->IsBlockNode()) AppendNode(new BlockNode());
58 BlockNode::cast(exit())->AddInstruction(instruction);
59 }
60
61
62 void FlowGraph::AppendNode(Node* node) {
63 // Add a node to the end of the graph. An empty block is added to
64 // maintain edge-split form (that no join nodes or exit nodes as
65 // successors to branch nodes).
66 ASSERT(node != NULL);
67 if (exit()->IsExitNode()) return;
68 if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
69 AppendNode(new BlockNode());
70 }
71 exit()->AddSuccessor(node);
72 node->AddPredecessor(exit());
73 exit_ = node;
74 }
75
76
77 void FlowGraph::AppendGraph(FlowGraph* graph) {
78 // Add a flow graph fragment to the end of this one. An empty block is
79 // added to maintain edge-split form (that no join nodes or exit nodes as
80 // successors to branch nodes).
81 ASSERT(graph != NULL);
82 if (exit()->IsExitNode()) return;
83 Node* node = graph->entry();
84 if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
85 AppendNode(new BlockNode());
86 }
87 exit()->AddSuccessor(node);
88 node->AddPredecessor(exit());
89 exit_ = graph->exit();
90 }
91
92
93 void FlowGraph::Split(BranchNode* branch,
94 FlowGraph* left,
95 FlowGraph* right,
96 JoinNode* join) {
97 // Add the branch node, left flowgraph, join node.
98 AppendNode(branch);
99 AppendGraph(left);
100 AppendNode(join);
101
102 // Splice in the right flowgraph.
103 right->AppendNode(join);
104 branch->AddSuccessor(right->entry());
105 right->entry()->AddPredecessor(branch);
106 }
107
108
109 void FlowGraph::Loop(JoinNode* join,
110 FlowGraph* condition,
111 BranchNode* branch,
112 FlowGraph* body) {
113 // Add the join, condition and branch. Add join's predecessors in
114 // left-to-right order.
115 AppendNode(join);
116 body->AppendNode(join);
117 AppendGraph(condition);
118 AppendNode(branch);
119
120 // Splice in the body flowgraph.
121 branch->AddSuccessor(body->entry());
122 body->entry()->AddPredecessor(branch);
123 }
124
125
126 void ExitNode::Traverse(bool mark,
127 ZoneList<Node*>* preorder,
128 ZoneList<Node*>* postorder) {
129 preorder->Add(this);
130 postorder->Add(this);
131 }
132
133
134 void BlockNode::Traverse(bool mark,
135 ZoneList<Node*>* preorder,
136 ZoneList<Node*>* postorder) {
137 ASSERT(successor_ != NULL);
138 preorder->Add(this);
139 if (!successor_->IsMarkedWith(mark)) {
140 successor_->MarkWith(mark);
141 successor_->Traverse(mark, preorder, postorder);
142 }
143 postorder->Add(this);
144 }
145
146
147 void BranchNode::Traverse(bool mark,
148 ZoneList<Node*>* preorder,
149 ZoneList<Node*>* postorder) {
150 ASSERT(successor0_ != NULL && successor1_ != NULL);
151 preorder->Add(this);
152 if (!successor1_->IsMarkedWith(mark)) {
153 successor1_->MarkWith(mark);
154 successor1_->Traverse(mark, preorder, postorder);
155 }
156 if (!successor0_->IsMarkedWith(mark)) {
157 successor0_->MarkWith(mark);
158 successor0_->Traverse(mark, preorder, postorder);
159 }
160 postorder->Add(this);
161 }
162
163
164 void JoinNode::Traverse(bool mark,
165 ZoneList<Node*>* preorder,
166 ZoneList<Node*>* postorder) {
167 ASSERT(successor_ != NULL);
168 preorder->Add(this);
169 if (!successor_->IsMarkedWith(mark)) {
170 successor_->MarkWith(mark);
171 successor_->Traverse(mark, preorder, postorder);
172 }
173 postorder->Add(this);
174 }
175
176
177 void FlowGraphBuilder::Build(FunctionLiteral* lit) {
178 global_exit_ = new ExitNode();
179 VisitStatements(lit->body());
180
181 if (HasStackOverflow()) return;
182
183 // The graph can end with a branch node (if the function ended with a
184 // loop). Maintain edge-split form (no join nodes or exit nodes as
185 // successors to branch nodes).
186 if (graph_.exit()->IsBranchNode()) graph_.AppendNode(new BlockNode());
187 graph_.AppendNode(global_exit_);
188
189 // Build preorder and postorder traversal orders. All the nodes in
190 // the graph have the same mark flag. For the traversal, use that
191 // flag's negation. Traversal will flip all the flags.
192 bool mark = graph_.entry()->IsMarkedWith(false);
193 graph_.entry()->MarkWith(mark);
194 graph_.entry()->Traverse(mark, &preorder_, &postorder_);
195 }
196
197
198 // This function peels off one iteration of a for-loop. The return value
199 // is either a block statement containing the peeled loop or NULL in case
200 // there is a stack overflow.
201 static Statement* PeelForLoop(ForStatement* stmt) {
202 // Mark this for-statement as processed.
203 stmt->set_peel_this_loop(false);
204
205 // Create new block containing the init statement of the for-loop and
206 // an if-statement containing the peeled iteration and the original
207 // loop without the init-statement.
208 Block* block = new Block(NULL, 2, false);
209 if (stmt->init() != NULL) {
210 Statement* init = stmt->init();
211 // The init statement gets the statement position of the for-loop
212 // to make debugging of peeled loops possible.
213 init->set_statement_pos(stmt->statement_pos());
214 block->AddStatement(init);
215 }
216
217 // Copy the condition.
218 CopyAstVisitor copy_visitor;
219 Expression* cond_copy = stmt->cond() != NULL
220 ? copy_visitor.DeepCopyExpr(stmt->cond())
221 : new Literal(Factory::true_value());
222 if (copy_visitor.HasStackOverflow()) return NULL;
223
224 // Construct a block with the peeled body and the rest of the for-loop.
225 Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body());
226 if (copy_visitor.HasStackOverflow()) return NULL;
227
228 Statement* next_copy = stmt->next() != NULL
229 ? copy_visitor.DeepCopyStmt(stmt->next())
230 : new EmptyStatement();
231 if (copy_visitor.HasStackOverflow()) return NULL;
232
233 Block* peeled_body = new Block(NULL, 3, false);
234 peeled_body->AddStatement(body_copy);
235 peeled_body->AddStatement(next_copy);
236 peeled_body->AddStatement(stmt);
237
238 // Remove the duplicated init statement from the for-statement.
239 stmt->set_init(NULL);
240
241 // Create new test at the top and add it to the newly created block.
242 IfStatement* test = new IfStatement(cond_copy,
243 peeled_body,
244 new EmptyStatement());
245 block->AddStatement(test);
246 return block;
247 }
248
249
250 void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
251 for (int i = 0, len = stmts->length(); i < len; i++) {
252 stmts->at(i) = ProcessStatement(stmts->at(i));
253 }
254 }
255
256
257 Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) {
258 if (FLAG_loop_peeling &&
259 stmt->AsForStatement() != NULL &&
260 stmt->AsForStatement()->peel_this_loop()) {
261 Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement());
262 if (tmp_stmt == NULL) {
263 SetStackOverflow();
264 } else {
265 stmt = tmp_stmt;
266 }
267 }
268 Visit(stmt);
269 return stmt;
270 }
271
272
273 void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
274 UNREACHABLE();
275 }
276
277
278 void FlowGraphBuilder::VisitBlock(Block* stmt) {
279 VisitStatements(stmt->statements());
280 }
281
282
283 void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
284 Visit(stmt->expression());
285 }
286
287
288 void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
289 // Nothing to do.
290 }
291
292
293 void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
294 Visit(stmt->condition());
295
296 BranchNode* branch = new BranchNode();
297 FlowGraph original = graph_;
298 graph_ = FlowGraph::Empty();
299 stmt->set_then_statement(ProcessStatement(stmt->then_statement()));
300
301 FlowGraph left = graph_;
302 graph_ = FlowGraph::Empty();
303 stmt->set_else_statement(ProcessStatement(stmt->else_statement()));
304
305 if (HasStackOverflow()) return;
306 JoinNode* join = new JoinNode();
307 original.Split(branch, &left, &graph_, join);
308 graph_ = original;
309 }
310
311
312 void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
313 SetStackOverflow();
314 }
315
316
317 void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
318 SetStackOverflow();
319 }
320
321
322 void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
323 SetStackOverflow();
324 }
325
326
327 void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
328 SetStackOverflow();
329 }
330
331
332 void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
333 SetStackOverflow();
334 }
335
336
337 void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
338 SetStackOverflow();
339 }
340
341
342 void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
343 SetStackOverflow();
344 }
345
346
347 void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
348 SetStackOverflow();
349 }
350
351
352 void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
353 if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init()));
354
355 JoinNode* join = new JoinNode();
356 FlowGraph original = graph_;
357 graph_ = FlowGraph::Empty();
358 if (stmt->cond() != NULL) Visit(stmt->cond());
359
360 BranchNode* branch = new BranchNode();
361 FlowGraph condition = graph_;
362 graph_ = FlowGraph::Empty();
363 stmt->set_body(ProcessStatement(stmt->body()));
364
365 if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next()));
366
367 if (HasStackOverflow()) return;
368 original.Loop(join, &condition, branch, &graph_);
369 graph_ = original;
370 }
371
372
373 void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
374 SetStackOverflow();
375 }
376
377
378 void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
379 SetStackOverflow();
380 }
381
382
383 void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
384 SetStackOverflow();
385 }
386
387
388 void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
389 SetStackOverflow();
390 }
391
392
393 void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
394 SetStackOverflow();
395 }
396
397
398 void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
399 SharedFunctionInfoLiteral* expr) {
400 SetStackOverflow();
401 }
402
403
404 void FlowGraphBuilder::VisitConditional(Conditional* expr) {
405 SetStackOverflow();
406 }
407
408
409 void FlowGraphBuilder::VisitSlot(Slot* expr) {
410 UNREACHABLE();
411 }
412
413
414 void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
415 graph_.AppendInstruction(expr);
416 }
417
418
419 void FlowGraphBuilder::VisitLiteral(Literal* expr) {
420 graph_.AppendInstruction(expr);
421 }
422
423
424 void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
425 SetStackOverflow();
426 }
427
428
429 void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
430 SetStackOverflow();
431 }
432
433
434 void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
435 SetStackOverflow();
436 }
437
438
439 void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
440 SetStackOverflow();
441 }
442
443
444 void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
445 Variable* var = expr->target()->AsVariableProxy()->AsVariable();
446 Property* prop = expr->target()->AsProperty();
447 // Left-hand side can be a variable or property (or reference error) but
448 // not both.
449 ASSERT(var == NULL || prop == NULL);
450 if (var != NULL) {
451 if (expr->is_compound()) Visit(expr->target());
452 Visit(expr->value());
453 if (var->IsStackAllocated()) {
454 // The first definition in the body is numbered n, where n is the
455 // number of parameters and stack-allocated locals.
456 expr->set_num(body_definitions_.length() + variable_count_);
457 body_definitions_.Add(expr);
458 }
459
460 } else if (prop != NULL) {
461 Visit(prop->obj());
462 if (!prop->key()->IsPropertyName()) Visit(prop->key());
463 Visit(expr->value());
464 }
465
466 if (HasStackOverflow()) return;
467 graph_.AppendInstruction(expr);
468 }
469
470
471 void FlowGraphBuilder::VisitThrow(Throw* expr) {
472 SetStackOverflow();
473 }
474
475
476 void FlowGraphBuilder::VisitProperty(Property* expr) {
477 Visit(expr->obj());
478 if (!expr->key()->IsPropertyName()) Visit(expr->key());
479
480 if (HasStackOverflow()) return;
481 graph_.AppendInstruction(expr);
482 }
483
484
485 void FlowGraphBuilder::VisitCall(Call* expr) {
486 Visit(expr->expression());
487 ZoneList<Expression*>* arguments = expr->arguments();
488 for (int i = 0, len = arguments->length(); i < len; i++) {
489 Visit(arguments->at(i));
490 }
491
492 if (HasStackOverflow()) return;
493 graph_.AppendInstruction(expr);
494 }
495
496
497 void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
498 SetStackOverflow();
499 }
500
501
502 void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
503 SetStackOverflow();
504 }
505
506
507 void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
508 switch (expr->op()) {
509 case Token::NOT:
510 case Token::BIT_NOT:
511 case Token::DELETE:
512 case Token::TYPEOF:
513 case Token::VOID:
514 SetStackOverflow();
515 break;
516
517 case Token::ADD:
518 case Token::SUB:
519 Visit(expr->expression());
520 if (HasStackOverflow()) return;
521 graph_.AppendInstruction(expr);
522 break;
523
524 default:
525 UNREACHABLE();
526 }
527 }
528
529
530 void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
531 Visit(expr->expression());
532 Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
533 if (var != NULL && var->IsStackAllocated()) {
534 // The first definition in the body is numbered n, where n is the number
535 // of parameters and stack-allocated locals.
536 expr->set_num(body_definitions_.length() + variable_count_);
537 body_definitions_.Add(expr);
538 }
539
540 if (HasStackOverflow()) return;
541 graph_.AppendInstruction(expr);
542 }
543
544
545 void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
546 switch (expr->op()) {
547 case Token::COMMA:
548 case Token::OR:
549 case Token::AND:
550 SetStackOverflow();
551 break;
552
553 case Token::BIT_OR:
554 case Token::BIT_XOR:
555 case Token::BIT_AND:
556 case Token::SHL:
557 case Token::SHR:
558 case Token::ADD:
559 case Token::SUB:
560 case Token::MUL:
561 case Token::DIV:
562 case Token::MOD:
563 case Token::SAR:
564 Visit(expr->left());
565 Visit(expr->right());
566 if (HasStackOverflow()) return;
567 graph_.AppendInstruction(expr);
568 break;
569
570 default:
571 UNREACHABLE();
572 }
573 }
574
575
576 void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
577 switch (expr->op()) {
578 case Token::EQ:
579 case Token::NE:
580 case Token::EQ_STRICT:
581 case Token::NE_STRICT:
582 case Token::INSTANCEOF:
583 case Token::IN:
584 SetStackOverflow();
585 break;
586
587 case Token::LT:
588 case Token::GT:
589 case Token::LTE:
590 case Token::GTE:
591 Visit(expr->left());
592 Visit(expr->right());
593 if (HasStackOverflow()) return;
594 graph_.AppendInstruction(expr);
595 break;
596
597 default:
598 UNREACHABLE();
599 }
600 }
601
602
603 void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
604 SetStackOverflow();
605 }
606
607
608 void AstLabeler::Label(CompilationInfo* info) { 54 void AstLabeler::Label(CompilationInfo* info) {
609 info_ = info; 55 info_ = info;
610 VisitStatements(info_->function()->body()); 56 VisitStatements(info_->function()->body());
611 } 57 }
612 58
613 59
614 void AstLabeler::VisitStatements(ZoneList<Statement*>* stmts) { 60 void AstLabeler::VisitStatements(ZoneList<Statement*>* stmts) {
615 for (int i = 0, len = stmts->length(); i < len; i++) { 61 for (int i = 0, len = stmts->length(); i < len; i++) {
616 Visit(stmts->at(i)); 62 Visit(stmts->at(i));
617 } 63 }
(...skipping 675 matching lines...) Expand 10 before | Expand all | Expand 10 after
1293 // Nothing to do. 739 // Nothing to do.
1294 ASSERT(av_.IsEmpty()); 740 ASSERT(av_.IsEmpty());
1295 } 741 }
1296 742
1297 743
1298 void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) { 744 void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
1299 UNREACHABLE(); 745 UNREACHABLE();
1300 } 746 }
1301 747
1302 748
749 int ReachingDefinitions::IndexFor(Variable* var, int variable_count) {
750 // Parameters are numbered left-to-right from the beginning of the bit
751 // set. Stack-allocated locals are allocated right-to-left from the end.
752 ASSERT(var != NULL && var->IsStackAllocated());
753 Slot* slot = var->slot();
754 if (slot->type() == Slot::PARAMETER) {
755 return slot->index();
756 } else {
757 return (variable_count - 1) - slot->index();
758 }
759 }
760
761
762 void Node::InitializeReachingDefinitions(int definition_count,
763 List<BitVector*>* variables,
764 WorkList<Node>* worklist,
765 bool mark) {
766 ASSERT(!IsMarkedWith(mark));
767 rd_.Initialize(definition_count);
768 MarkWith(mark);
769 worklist->Insert(this);
770 }
771
772
773 void BlockNode::InitializeReachingDefinitions(int definition_count,
774 List<BitVector*>* variables,
775 WorkList<Node>* worklist,
776 bool mark) {
777 ASSERT(!IsMarkedWith(mark));
778 int instruction_count = instructions_.length();
779 int variable_count = variables->length();
780
781 rd_.Initialize(definition_count);
782 // The RD_in set for the entry node has a definition for each parameter
783 // and local.
784 if (predecessor_ == NULL) {
785 for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i);
786 }
787
788 for (int i = 0; i < instruction_count; i++) {
789 Expression* expr = instructions_[i]->AsExpression();
790 if (expr == NULL) continue;
791 Variable* var = expr->AssignedVariable();
792 if (var == NULL || !var->IsStackAllocated()) continue;
793
794 // All definitions of this variable are killed.
795 BitVector* def_set =
796 variables->at(ReachingDefinitions::IndexFor(var, variable_count));
797 rd_.kill()->Union(*def_set);
798
799 // All previously generated definitions are not generated.
800 rd_.gen()->Subtract(*def_set);
801
802 // This one is generated.
803 rd_.gen()->Add(expr->num());
804 }
805
806 // Add all blocks except the entry node to the worklist.
807 if (predecessor_ != NULL) {
808 MarkWith(mark);
809 worklist->Insert(this);
810 }
811 }
812
813
814 void ExitNode::ComputeRDOut(BitVector* result) {
815 // Should not be the predecessor of any node.
816 UNREACHABLE();
817 }
818
819
820 void BlockNode::ComputeRDOut(BitVector* result) {
821 // All definitions reaching this block ...
822 *result = *rd_.rd_in();
823 // ... except those killed by the block ...
824 result->Subtract(*rd_.kill());
825 // ... but including those generated by the block.
826 result->Union(*rd_.gen());
827 }
828
829
830 void BranchNode::ComputeRDOut(BitVector* result) {
831 // Branch nodes don't kill or generate definitions.
832 *result = *rd_.rd_in();
833 }
834
835
836 void JoinNode::ComputeRDOut(BitVector* result) {
837 // Join nodes don't kill or generate definitions.
838 *result = *rd_.rd_in();
839 }
840
841
842 void ExitNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
843 // The exit node has no successors so we can just update in place. New
844 // RD_in is the union over all predecessors.
845 int definition_count = rd_.rd_in()->length();
846 rd_.rd_in()->Clear();
847
848 BitVector temp(definition_count);
849 for (int i = 0, len = predecessors_.length(); i < len; i++) {
850 // Because ComputeRDOut always overwrites temp and its value is
851 // always read out before calling ComputeRDOut again, we do not
852 // have to clear it on each iteration of the loop.
853 predecessors_[i]->ComputeRDOut(&temp);
854 rd_.rd_in()->Union(temp);
855 }
856 }
857
858
859 void BlockNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
860 // The entry block has no predecessor. Its RD_in does not change.
861 if (predecessor_ == NULL) return;
862
863 BitVector new_rd_in(rd_.rd_in()->length());
864 predecessor_->ComputeRDOut(&new_rd_in);
865
866 if (rd_.rd_in()->Equals(new_rd_in)) return;
867
868 // Update RD_in.
869 *rd_.rd_in() = new_rd_in;
870 // Add the successor to the worklist if not already present.
871 if (!successor_->IsMarkedWith(mark)) {
872 successor_->MarkWith(mark);
873 worklist->Insert(successor_);
874 }
875 }
876
877
878 void BranchNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
879 BitVector new_rd_in(rd_.rd_in()->length());
880 predecessor_->ComputeRDOut(&new_rd_in);
881
882 if (rd_.rd_in()->Equals(new_rd_in)) return;
883
884 // Update RD_in.
885 *rd_.rd_in() = new_rd_in;
886 // Add the successors to the worklist if not already present.
887 if (!successor0_->IsMarkedWith(mark)) {
888 successor0_->MarkWith(mark);
889 worklist->Insert(successor0_);
890 }
891 if (!successor1_->IsMarkedWith(mark)) {
892 successor1_->MarkWith(mark);
893 worklist->Insert(successor1_);
894 }
895 }
896
897
898 void JoinNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
899 int definition_count = rd_.rd_in()->length();
900 BitVector new_rd_in(definition_count);
901
902 // New RD_in is the union over all predecessors.
903 BitVector temp(definition_count);
904 for (int i = 0, len = predecessors_.length(); i < len; i++) {
905 predecessors_[i]->ComputeRDOut(&temp);
906 new_rd_in.Union(temp);
907 }
908
909 if (rd_.rd_in()->Equals(new_rd_in)) return;
910
911 // Update RD_in.
912 *rd_.rd_in() = new_rd_in;
913 // Add the successor to the worklist if not already present.
914 if (!successor_->IsMarkedWith(mark)) {
915 successor_->MarkWith(mark);
916 worklist->Insert(successor_);
917 }
918 }
919
920
921 void Node::PropagateReachingDefinitions(List<BitVector*>* variables) {
922 // Nothing to do.
923 }
924
925
926 void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
927 // Propagate RD_in from the start of the block to all the variable
928 // references.
929 int variable_count = variables->length();
930 BitVector rd = *rd_.rd_in();
931 for (int i = 0, len = instructions_.length(); i < len; i++) {
932 Expression* expr = instructions_[i]->AsExpression();
933 if (expr == NULL) continue;
934
935 // Look for a variable reference to record its reaching definitions.
936 VariableProxy* proxy = expr->AsVariableProxy();
937 if (proxy == NULL) {
938 // Not a VariableProxy? Maybe it's a count operation.
939 CountOperation* count_operation = expr->AsCountOperation();
940 if (count_operation != NULL) {
941 proxy = count_operation->expression()->AsVariableProxy();
942 }
943 }
944 if (proxy == NULL) {
945 // OK, Maybe it's a compound assignment.
946 Assignment* assignment = expr->AsAssignment();
947 if (assignment != NULL && assignment->is_compound()) {
948 proxy = assignment->target()->AsVariableProxy();
949 }
950 }
951
952 if (proxy != NULL &&
953 proxy->var()->IsStackAllocated() &&
954 !proxy->var()->is_this()) {
955 // All definitions for this variable.
956 BitVector* definitions =
957 variables->at(ReachingDefinitions::IndexFor(proxy->var(),
958 variable_count));
959 BitVector* reaching_definitions = new BitVector(*definitions);
960 // Intersected with all definitions (of any variable) reaching this
961 // instruction.
962 reaching_definitions->Intersect(rd);
963 proxy->set_reaching_definitions(reaching_definitions);
964 }
965
966 // It may instead (or also) be a definition. If so update the running
967 // value of reaching definitions for the block.
968 Variable* var = expr->AssignedVariable();
969 if (var == NULL || !var->IsStackAllocated()) continue;
970
971 // All definitions of this variable are killed.
972 BitVector* def_set =
973 variables->at(ReachingDefinitions::IndexFor(var, variable_count));
974 rd.Subtract(*def_set);
975 // This definition is generated.
976 rd.Add(expr->num());
977 }
978 }
979
980
981 void ReachingDefinitions::Compute() {
982 // The definitions in the body plus an implicit definition for each
983 // variable at function entry.
984 int definition_count = body_definitions_->length() + variable_count_;
985 int node_count = postorder_->length();
986
987 // Step 1: For each stack-allocated variable, identify the set of all its
988 // definitions.
989 List<BitVector*> variables;
990 for (int i = 0; i < variable_count_; i++) {
991 // Add the initial definition for each variable.
992 BitVector* initial = new BitVector(definition_count);
993 initial->Add(i);
994 variables.Add(initial);
995 }
996 for (int i = 0, len = body_definitions_->length(); i < len; i++) {
997 // Account for each definition in the body as a definition of the
998 // defined variable.
999 Variable* var = body_definitions_->at(i)->AssignedVariable();
1000 variables[IndexFor(var, variable_count_)]->Add(i + variable_count_);
1001 }
1002
1003 // Step 2: Compute KILL and GEN for each block node, initialize RD_in for
1004 // all nodes, and mark and add all nodes to the worklist in reverse
1005 // postorder. All nodes should currently have the same mark.
1006 bool mark = postorder_->at(0)->IsMarkedWith(false); // Negation of current.
1007 WorkList<Node> worklist(node_count);
1008 for (int i = node_count - 1; i >= 0; i--) {
1009 postorder_->at(i)->InitializeReachingDefinitions(definition_count,
1010 &variables,
1011 &worklist,
1012 mark);
1013 }
1014
1015 // Step 3: Until the worklist is empty, remove an item compute and update
1016 // its rd_in based on its predecessor's rd_out. If rd_in has changed, add
1017 // all necessary successors to the worklist.
1018 while (!worklist.is_empty()) {
1019 Node* node = worklist.Remove();
1020 node->MarkWith(!mark);
1021 node->UpdateRDIn(&worklist, mark);
1022 }
1023
1024 // Step 4: Based on RD_in for block nodes, propagate reaching definitions
1025 // to all variable uses in the block.
1026 for (int i = 0; i < node_count; i++) {
1027 postorder_->at(i)->PropagateReachingDefinitions(&variables);
1028 }
1029 }
1030
1031
1032 bool TypeAnalyzer::IsPrimitiveDef(int def_num) {
1033 if (def_num < param_count_) return false;
1034 if (def_num < variable_count_) return true;
1035 return body_definitions_->at(def_num - variable_count_)->IsPrimitive();
1036 }
1037
1038
1039 void TypeAnalyzer::Compute() {
1040 bool changed;
1041 int count = 0;
1042
1043 do {
1044 changed = false;
1045
1046 if (FLAG_print_graph_text) {
1047 PrintF("TypeAnalyzer::Compute - iteration %d\n", count++);
1048 }
1049
1050 for (int i = postorder_->length() - 1; i >= 0; --i) {
1051 Node* node = postorder_->at(i);
1052 if (node->IsBlockNode()) {
1053 BlockNode* block = BlockNode::cast(node);
1054 for (int j = 0; j < block->instructions()->length(); j++) {
1055 Expression* expr = block->instructions()->at(j)->AsExpression();
1056 if (expr != NULL) {
1057 // For variable uses: Compute new type from reaching definitions.
1058 VariableProxy* proxy = expr->AsVariableProxy();
1059 if (proxy != NULL && proxy->reaching_definitions() != NULL) {
1060 BitVector* rd = proxy->reaching_definitions();
1061 bool prim_type = true;
1062 // TODO(fsc): A sparse set representation of reaching
1063 // definitions would speed up iterating here.
1064 for (int k = 0; k < rd->length(); k++) {
1065 if (rd->Contains(k) && !IsPrimitiveDef(k)) {
1066 prim_type = false;
1067 break;
1068 }
1069 }
1070 // Reset changed flag if new type information was computed.
1071 if (prim_type != proxy->IsPrimitive()) {
1072 changed = true;
1073 proxy->SetIsPrimitive(prim_type);
1074 }
1075 }
1076 }
1077 }
1078 }
1079 }
1080 } while (changed);
1081 }
1082
1083
1084 void Node::MarkCriticalInstructions(
1085 List<AstNode*>* stack,
1086 ZoneList<Expression*>* body_definitions,
1087 int variable_count) {
1088 }
1089
1090
1091 void BlockNode::MarkCriticalInstructions(
1092 List<AstNode*>* stack,
1093 ZoneList<Expression*>* body_definitions,
1094 int variable_count) {
1095 for (int i = instructions_.length() - 1; i >= 0; i--) {
1096 // Only expressions can appear in the flow graph for now.
1097 Expression* expr = instructions_[i]->AsExpression();
1098 if (expr != NULL && !expr->is_live() &&
1099 (expr->is_loop_condition() || expr->IsCritical())) {
1100 expr->mark_as_live();
1101 expr->ProcessNonLiveChildren(stack, body_definitions, variable_count);
1102 }
1103 }
1104 }
1105
1106
1107 void MarkLiveCode(ZoneList<Node*>* nodes,
1108 ZoneList<Expression*>* body_definitions,
1109 int variable_count) {
1110 List<AstNode*> stack(20);
1111
1112 // Mark the critical AST nodes as live; mark their dependencies and
1113 // add them to the marking stack.
1114 for (int i = nodes->length() - 1; i >= 0; i--) {
1115 nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions,
1116 variable_count);
1117 }
1118
1119 // Continue marking dependencies until no more.
1120 while (!stack.is_empty()) {
1121 // Only expressions can appear in the flow graph for now.
1122 Expression* expr = stack.RemoveLast()->AsExpression();
1123 if (expr != NULL) {
1124 expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count);
1125 }
1126 }
1127 }
1128
1129
1303 #ifdef DEBUG 1130 #ifdef DEBUG
1304 1131
1305 // Print a textual representation of an instruction in a flow graph. Using 1132 // Print a textual representation of an instruction in a flow graph. Using
1306 // the AstVisitor is overkill because there is no recursion here. It is 1133 // the AstVisitor is overkill because there is no recursion here. It is
1307 // only used for printing in debug mode. 1134 // only used for printing in debug mode.
1308 class TextInstructionPrinter: public AstVisitor { 1135 class TextInstructionPrinter: public AstVisitor {
1309 public: 1136 public:
1310 TextInstructionPrinter() : number_(0) {} 1137 TextInstructionPrinter() : number_(0) {}
1311 1138
1312 int NextNumber() { return number_; } 1139 int NextNumber() { return number_; }
(...skipping 394 matching lines...) Expand 10 before | Expand all | Expand 10 after
1707 for (int i = postorder->length() - 1; i >= 0; i--) { 1534 for (int i = postorder->length() - 1; i >= 0; i--) {
1708 postorder->at(i)->AssignNodeNumber(); 1535 postorder->at(i)->AssignNodeNumber();
1709 } 1536 }
1710 1537
1711 // Print basic blocks in reverse postorder. 1538 // Print basic blocks in reverse postorder.
1712 for (int i = postorder->length() - 1; i >= 0; i--) { 1539 for (int i = postorder->length() - 1; i >= 0; i--) {
1713 postorder->at(i)->PrintText(); 1540 postorder->at(i)->PrintText();
1714 } 1541 }
1715 } 1542 }
1716 1543
1717 1544 #endif // DEBUG
1718 #endif // defined(DEBUG)
1719
1720
1721 int ReachingDefinitions::IndexFor(Variable* var, int variable_count) {
1722 // Parameters are numbered left-to-right from the beginning of the bit
1723 // set. Stack-allocated locals are allocated right-to-left from the end.
1724 ASSERT(var != NULL && var->IsStackAllocated());
1725 Slot* slot = var->slot();
1726 if (slot->type() == Slot::PARAMETER) {
1727 return slot->index();
1728 } else {
1729 return (variable_count - 1) - slot->index();
1730 }
1731 }
1732
1733
1734 void Node::InitializeReachingDefinitions(int definition_count,
1735 List<BitVector*>* variables,
1736 WorkList<Node>* worklist,
1737 bool mark) {
1738 ASSERT(!IsMarkedWith(mark));
1739 rd_.Initialize(definition_count);
1740 MarkWith(mark);
1741 worklist->Insert(this);
1742 }
1743
1744
1745 void BlockNode::InitializeReachingDefinitions(int definition_count,
1746 List<BitVector*>* variables,
1747 WorkList<Node>* worklist,
1748 bool mark) {
1749 ASSERT(!IsMarkedWith(mark));
1750 int instruction_count = instructions_.length();
1751 int variable_count = variables->length();
1752
1753 rd_.Initialize(definition_count);
1754 // The RD_in set for the entry node has a definition for each parameter
1755 // and local.
1756 if (predecessor_ == NULL) {
1757 for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i);
1758 }
1759
1760 for (int i = 0; i < instruction_count; i++) {
1761 Expression* expr = instructions_[i]->AsExpression();
1762 if (expr == NULL) continue;
1763 Variable* var = expr->AssignedVariable();
1764 if (var == NULL || !var->IsStackAllocated()) continue;
1765
1766 // All definitions of this variable are killed.
1767 BitVector* def_set =
1768 variables->at(ReachingDefinitions::IndexFor(var, variable_count));
1769 rd_.kill()->Union(*def_set);
1770
1771 // All previously generated definitions are not generated.
1772 rd_.gen()->Subtract(*def_set);
1773
1774 // This one is generated.
1775 rd_.gen()->Add(expr->num());
1776 }
1777
1778 // Add all blocks except the entry node to the worklist.
1779 if (predecessor_ != NULL) {
1780 MarkWith(mark);
1781 worklist->Insert(this);
1782 }
1783 }
1784
1785
1786 void ExitNode::ComputeRDOut(BitVector* result) {
1787 // Should not be the predecessor of any node.
1788 UNREACHABLE();
1789 }
1790
1791
1792 void BlockNode::ComputeRDOut(BitVector* result) {
1793 // All definitions reaching this block ...
1794 *result = *rd_.rd_in();
1795 // ... except those killed by the block ...
1796 result->Subtract(*rd_.kill());
1797 // ... but including those generated by the block.
1798 result->Union(*rd_.gen());
1799 }
1800
1801
1802 void BranchNode::ComputeRDOut(BitVector* result) {
1803 // Branch nodes don't kill or generate definitions.
1804 *result = *rd_.rd_in();
1805 }
1806
1807
1808 void JoinNode::ComputeRDOut(BitVector* result) {
1809 // Join nodes don't kill or generate definitions.
1810 *result = *rd_.rd_in();
1811 }
1812
1813
1814 void ExitNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
1815 // The exit node has no successors so we can just update in place. New
1816 // RD_in is the union over all predecessors.
1817 int definition_count = rd_.rd_in()->length();
1818 rd_.rd_in()->Clear();
1819
1820 BitVector temp(definition_count);
1821 for (int i = 0, len = predecessors_.length(); i < len; i++) {
1822 // Because ComputeRDOut always overwrites temp and its value is
1823 // always read out before calling ComputeRDOut again, we do not
1824 // have to clear it on each iteration of the loop.
1825 predecessors_[i]->ComputeRDOut(&temp);
1826 rd_.rd_in()->Union(temp);
1827 }
1828 }
1829
1830
1831 void BlockNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
1832 // The entry block has no predecessor. Its RD_in does not change.
1833 if (predecessor_ == NULL) return;
1834
1835 BitVector new_rd_in(rd_.rd_in()->length());
1836 predecessor_->ComputeRDOut(&new_rd_in);
1837
1838 if (rd_.rd_in()->Equals(new_rd_in)) return;
1839
1840 // Update RD_in.
1841 *rd_.rd_in() = new_rd_in;
1842 // Add the successor to the worklist if not already present.
1843 if (!successor_->IsMarkedWith(mark)) {
1844 successor_->MarkWith(mark);
1845 worklist->Insert(successor_);
1846 }
1847 }
1848
1849
1850 void BranchNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
1851 BitVector new_rd_in(rd_.rd_in()->length());
1852 predecessor_->ComputeRDOut(&new_rd_in);
1853
1854 if (rd_.rd_in()->Equals(new_rd_in)) return;
1855
1856 // Update RD_in.
1857 *rd_.rd_in() = new_rd_in;
1858 // Add the successors to the worklist if not already present.
1859 if (!successor0_->IsMarkedWith(mark)) {
1860 successor0_->MarkWith(mark);
1861 worklist->Insert(successor0_);
1862 }
1863 if (!successor1_->IsMarkedWith(mark)) {
1864 successor1_->MarkWith(mark);
1865 worklist->Insert(successor1_);
1866 }
1867 }
1868
1869
1870 void JoinNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
1871 int definition_count = rd_.rd_in()->length();
1872 BitVector new_rd_in(definition_count);
1873
1874 // New RD_in is the union over all predecessors.
1875 BitVector temp(definition_count);
1876 for (int i = 0, len = predecessors_.length(); i < len; i++) {
1877 predecessors_[i]->ComputeRDOut(&temp);
1878 new_rd_in.Union(temp);
1879 }
1880
1881 if (rd_.rd_in()->Equals(new_rd_in)) return;
1882
1883 // Update RD_in.
1884 *rd_.rd_in() = new_rd_in;
1885 // Add the successor to the worklist if not already present.
1886 if (!successor_->IsMarkedWith(mark)) {
1887 successor_->MarkWith(mark);
1888 worklist->Insert(successor_);
1889 }
1890 }
1891
1892
1893 void Node::PropagateReachingDefinitions(List<BitVector*>* variables) {
1894 // Nothing to do.
1895 }
1896
1897
1898 void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
1899 // Propagate RD_in from the start of the block to all the variable
1900 // references.
1901 int variable_count = variables->length();
1902 BitVector rd = *rd_.rd_in();
1903 for (int i = 0, len = instructions_.length(); i < len; i++) {
1904 Expression* expr = instructions_[i]->AsExpression();
1905 if (expr == NULL) continue;
1906
1907 // Look for a variable reference to record its reaching definitions.
1908 VariableProxy* proxy = expr->AsVariableProxy();
1909 if (proxy == NULL) {
1910 // Not a VariableProxy? Maybe it's a count operation.
1911 CountOperation* count_operation = expr->AsCountOperation();
1912 if (count_operation != NULL) {
1913 proxy = count_operation->expression()->AsVariableProxy();
1914 }
1915 }
1916 if (proxy == NULL) {
1917 // OK, Maybe it's a compound assignment.
1918 Assignment* assignment = expr->AsAssignment();
1919 if (assignment != NULL && assignment->is_compound()) {
1920 proxy = assignment->target()->AsVariableProxy();
1921 }
1922 }
1923
1924 if (proxy != NULL &&
1925 proxy->var()->IsStackAllocated() &&
1926 !proxy->var()->is_this()) {
1927 // All definitions for this variable.
1928 BitVector* definitions =
1929 variables->at(ReachingDefinitions::IndexFor(proxy->var(),
1930 variable_count));
1931 BitVector* reaching_definitions = new BitVector(*definitions);
1932 // Intersected with all definitions (of any variable) reaching this
1933 // instruction.
1934 reaching_definitions->Intersect(rd);
1935 proxy->set_reaching_definitions(reaching_definitions);
1936 }
1937
1938 // It may instead (or also) be a definition. If so update the running
1939 // value of reaching definitions for the block.
1940 Variable* var = expr->AssignedVariable();
1941 if (var == NULL || !var->IsStackAllocated()) continue;
1942
1943 // All definitions of this variable are killed.
1944 BitVector* def_set =
1945 variables->at(ReachingDefinitions::IndexFor(var, variable_count));
1946 rd.Subtract(*def_set);
1947 // This definition is generated.
1948 rd.Add(expr->num());
1949 }
1950 }
1951
1952
1953 void ReachingDefinitions::Compute() {
1954 // The definitions in the body plus an implicit definition for each
1955 // variable at function entry.
1956 int definition_count = body_definitions_->length() + variable_count_;
1957 int node_count = postorder_->length();
1958
1959 // Step 1: For each stack-allocated variable, identify the set of all its
1960 // definitions.
1961 List<BitVector*> variables;
1962 for (int i = 0; i < variable_count_; i++) {
1963 // Add the initial definition for each variable.
1964 BitVector* initial = new BitVector(definition_count);
1965 initial->Add(i);
1966 variables.Add(initial);
1967 }
1968 for (int i = 0, len = body_definitions_->length(); i < len; i++) {
1969 // Account for each definition in the body as a definition of the
1970 // defined variable.
1971 Variable* var = body_definitions_->at(i)->AssignedVariable();
1972 variables[IndexFor(var, variable_count_)]->Add(i + variable_count_);
1973 }
1974
1975 // Step 2: Compute KILL and GEN for each block node, initialize RD_in for
1976 // all nodes, and mark and add all nodes to the worklist in reverse
1977 // postorder. All nodes should currently have the same mark.
1978 bool mark = postorder_->at(0)->IsMarkedWith(false); // Negation of current.
1979 WorkList<Node> worklist(node_count);
1980 for (int i = node_count - 1; i >= 0; i--) {
1981 postorder_->at(i)->InitializeReachingDefinitions(definition_count,
1982 &variables,
1983 &worklist,
1984 mark);
1985 }
1986
1987 // Step 3: Until the worklist is empty, remove an item compute and update
1988 // its rd_in based on its predecessor's rd_out. If rd_in has changed, add
1989 // all necessary successors to the worklist.
1990 while (!worklist.is_empty()) {
1991 Node* node = worklist.Remove();
1992 node->MarkWith(!mark);
1993 node->UpdateRDIn(&worklist, mark);
1994 }
1995
1996 // Step 4: Based on RD_in for block nodes, propagate reaching definitions
1997 // to all variable uses in the block.
1998 for (int i = 0; i < node_count; i++) {
1999 postorder_->at(i)->PropagateReachingDefinitions(&variables);
2000 }
2001 }
2002
2003
2004 bool TypeAnalyzer::IsPrimitiveDef(int def_num) {
2005 if (def_num < param_count_) return false;
2006 if (def_num < variable_count_) return true;
2007 return body_definitions_->at(def_num - variable_count_)->IsPrimitive();
2008 }
2009
2010
2011 void TypeAnalyzer::Compute() {
2012 bool changed;
2013 int count = 0;
2014
2015 do {
2016 changed = false;
2017
2018 if (FLAG_print_graph_text) {
2019 PrintF("TypeAnalyzer::Compute - iteration %d\n", count++);
2020 }
2021
2022 for (int i = postorder_->length() - 1; i >= 0; --i) {
2023 Node* node = postorder_->at(i);
2024 if (node->IsBlockNode()) {
2025 BlockNode* block = BlockNode::cast(node);
2026 for (int j = 0; j < block->instructions()->length(); j++) {
2027 Expression* expr = block->instructions()->at(j)->AsExpression();
2028 if (expr != NULL) {
2029 // For variable uses: Compute new type from reaching definitions.
2030 VariableProxy* proxy = expr->AsVariableProxy();
2031 if (proxy != NULL && proxy->reaching_definitions() != NULL) {
2032 BitVector* rd = proxy->reaching_definitions();
2033 bool prim_type = true;
2034 // TODO(fsc): A sparse set representation of reaching
2035 // definitions would speed up iterating here.
2036 for (int k = 0; k < rd->length(); k++) {
2037 if (rd->Contains(k) && !IsPrimitiveDef(k)) {
2038 prim_type = false;
2039 break;
2040 }
2041 }
2042 // Reset changed flag if new type information was computed.
2043 if (prim_type != proxy->IsPrimitive()) {
2044 changed = true;
2045 proxy->SetIsPrimitive(prim_type);
2046 }
2047 }
2048 }
2049 }
2050 }
2051 }
2052 } while (changed);
2053 }
2054
2055
2056 void Node::MarkCriticalInstructions(
2057 List<AstNode*>* stack,
2058 ZoneList<Expression*>* body_definitions,
2059 int variable_count) {
2060 }
2061
2062
2063 void BlockNode::MarkCriticalInstructions(
2064 List<AstNode*>* stack,
2065 ZoneList<Expression*>* body_definitions,
2066 int variable_count) {
2067 for (int i = instructions_.length() - 1; i >= 0; i--) {
2068 // Only expressions can appear in the flow graph for now.
2069 Expression* expr = instructions_[i]->AsExpression();
2070 if (expr != NULL && !expr->is_live() &&
2071 (expr->is_loop_condition() || expr->IsCritical())) {
2072 expr->mark_as_live();
2073 expr->ProcessNonLiveChildren(stack, body_definitions, variable_count);
2074 }
2075 }
2076 }
2077
2078
2079 void MarkLiveCode(ZoneList<Node*>* nodes,
2080 ZoneList<Expression*>* body_definitions,
2081 int variable_count) {
2082 List<AstNode*> stack(20);
2083
2084 // Mark the critical AST nodes as live; mark their dependencies and
2085 // add them to the marking stack.
2086 for (int i = nodes->length() - 1; i >= 0; i--) {
2087 nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions,
2088 variable_count);
2089 }
2090
2091 // Continue marking dependencies until no more.
2092 while (!stack.is_empty()) {
2093 // Only expressions can appear in the flow graph for now.
2094 Expression* expr = stack.RemoveLast()->AsExpression();
2095 if (expr != NULL) {
2096 expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count);
2097 }
2098 }
2099 }
2100 1545
2101 1546
2102 } } // namespace v8::internal 1547 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/data-flow.h ('k') | src/flow-graph.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698