Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(255)

Side by Side Diff: src/compiler/control-reducer.cc

Issue 665223006: Implement control reducer, which reduces branches and phis together in a single fixpoint. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/control-reducer.h ('k') | test/cctest/compiler/test-control-reducer.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/common-operator.h" 5 #include "src/compiler/common-operator.h"
6 #include "src/compiler/control-reducer.h" 6 #include "src/compiler/control-reducer.h"
7 #include "src/compiler/graph.h" 7 #include "src/compiler/graph.h"
8 #include "src/compiler/js-graph.h" 8 #include "src/compiler/js-graph.h"
9 #include "src/compiler/node-matchers.h" 9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h" 10 #include "src/compiler/node-properties-inl.h"
11 #include "src/zone-containers.h" 11 #include "src/zone-containers.h"
12 12
13 namespace v8 { 13 namespace v8 {
14 namespace internal { 14 namespace internal {
15 namespace compiler { 15 namespace compiler {
16 16
17 enum VisitState { kUnvisited, kOnStack, kRevisit, kVisited }; 17 enum VisitState { kUnvisited = 0, kOnStack = 1, kRevisit = 2, kVisited = 3 };
18 enum Reachability { kFromStart = 8 };
18 19
19 #define TRACE(x) \ 20 #define TRACE(x) \
20 if (FLAG_trace_turbo) PrintF x 21 if (FLAG_trace_turbo) PrintF x
21 22
22 class ControlReducerImpl { 23 class ControlReducerImpl {
23 public: 24 public:
24 ControlReducerImpl(Zone* zone, JSGraph* jsgraph, 25 ControlReducerImpl(Zone* zone, JSGraph* jsgraph,
25 CommonOperatorBuilder* common) 26 CommonOperatorBuilder* common)
26 : zone_(zone), 27 : zone_(zone),
27 jsgraph_(jsgraph), 28 jsgraph_(jsgraph),
28 common_(common), 29 common_(common),
29 state_(jsgraph->graph()->NodeCount(), kUnvisited, zone_), 30 state_(jsgraph->graph()->NodeCount(), kUnvisited, zone_),
30 stack_(zone_), 31 stack_(zone_),
31 revisit_(zone_), 32 revisit_(zone_),
32 dead_(NULL) {} 33 dead_(NULL) {}
33 34
34 Zone* zone_; 35 Zone* zone_;
35 JSGraph* jsgraph_; 36 JSGraph* jsgraph_;
36 CommonOperatorBuilder* common_; 37 CommonOperatorBuilder* common_;
37 ZoneVector<VisitState> state_; 38 ZoneVector<VisitState> state_;
38 ZoneDeque<Node*> stack_; 39 ZoneDeque<Node*> stack_;
39 ZoneDeque<Node*> revisit_; 40 ZoneDeque<Node*> revisit_;
40 Node* dead_; 41 Node* dead_;
41 42
42 void Trim() { 43 void Reduce() {
43 // Mark all nodes reachable from end. 44 Push(graph()->end());
45 do {
46 // Process the node on the top of the stack, potentially pushing more
47 // or popping the node off the stack.
48 ReduceTop();
49 // If the stack becomes empty, revisit any nodes in the revisit queue.
50 // If no nodes in the revisit queue, try removing dead loops.
51 // If no dead loops, then finish.
52 } while (!stack_.empty() || TryRevisit() || RepairAndRemoveLoops());
53 }
54
55 bool TryRevisit() {
56 while (!revisit_.empty()) {
57 Node* n = revisit_.back();
58 revisit_.pop_back();
59 if (state_[n->id()] == kRevisit) { // state can change while in queue.
60 Push(n);
61 return true;
62 }
63 }
64 return false;
65 }
66
67 // Repair the graph after the possible creation of non-terminating or dead
68 // loops. Removing dead loops can produce more opportunities for reduction.
69 bool RepairAndRemoveLoops() {
70 // TODO(turbofan): we can skip this if the graph has no loops, but
71 // we have to be careful about proper loop detection during reduction.
72
73 // Gather all nodes backwards-reachable from end (through inputs).
74 state_.assign(graph()->NodeCount(), kUnvisited);
44 NodeVector nodes(zone_); 75 NodeVector nodes(zone_);
45 state_.assign(jsgraph_->graph()->NodeCount(), kUnvisited); 76 AddNodesReachableFromEnd(nodes);
46 Push(jsgraph_->graph()->end()); 77
78 // Walk forward through control nodes, looking for back edges to nodes
79 // that are not connected to end. Those are non-terminating loops (NTLs).
80 Node* start = graph()->start();
81 ZoneVector<byte> fw_reachability(graph()->NodeCount(), 0, zone_);
82 fw_reachability[start->id()] = kFromStart | kOnStack;
83 stack_.push_back(start);
84
47 while (!stack_.empty()) { 85 while (!stack_.empty()) {
48 Node* node = stack_[stack_.size() - 1]; 86 Node* node = stack_.back();
49 stack_.pop_back(); 87 TRACE(("ControlFw: #%d:%s\n", node->id(), node->op()->mnemonic()));
50 state_[node->id()] = kVisited; 88 bool pop = true;
51 nodes.push_back(node); 89 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
90 Node* succ = *i;
91 byte reach = fw_reachability[succ->id()];
92 if ((reach & kOnStack) != 0 && state_[succ->id()] != kVisited) {
93 // {succ} is on stack and not reachable from end.
94 ConnectNTL(nodes, succ);
95 fw_reachability.resize(graph()->NodeCount(), 0);
96 pop = false; // continue traversing inputs to this node.
97 break;
98 }
99 if ((reach & kFromStart) == 0 &&
100 IrOpcode::IsControlOpcode(succ->opcode())) {
101 // {succ} is a control node and not yet reached from start.
102 fw_reachability[succ->id()] |= kFromStart | kOnStack;
103 stack_.push_back(succ);
104 pop = false; // "recurse" into successor control node.
105 break;
106 }
107 }
108 if (pop) {
109 fw_reachability[node->id()] &= ~kOnStack;
110 stack_.pop_back();
111 }
112 }
113
114 // Trim references from dead nodes to live nodes first.
115 jsgraph_->GetCachedNodes(&nodes);
116 TrimNodes(nodes);
117
118 // Any control nodes not reachable from start are dead, even loops.
119 for (size_t i = 0; i < nodes.size(); i++) {
120 Node* node = nodes[i];
121 byte reach = fw_reachability[node->id()];
122 if ((reach & kFromStart) == 0 &&
123 IrOpcode::IsControlOpcode(node->opcode())) {
124 ReplaceNode(node, dead()); // uses will be added to revisit queue.
125 }
126 }
127 return TryRevisit(); // try to push a node onto the stack.
128 }
129
130 // Connect {loop}, the header of a non-terminating loop, to the end node.
131 void ConnectNTL(NodeVector& nodes, Node* loop) {
132 TRACE(("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic()));
133 CHECK_EQ(IrOpcode::kLoop, loop->opcode());
134 Node* to_add = loop;
135 Node* end = graph()->end();
136 CHECK_EQ(IrOpcode::kEnd, end->opcode());
137 Node* merge = end->InputAt(0);
138 if (merge->opcode() != IrOpcode::kMerge) {
139 if (merge->opcode() == IrOpcode::kDead) {
140 // end died; just connect end to the loop.
141 end->ReplaceInput(0, loop);
142 to_add = loop;
143 } else {
144 // Introduce a new merge for the end.
145 merge = graph()->NewNode(common_->Merge(2), merge, loop);
146 state_.resize(graph()->NodeCount(), kUnvisited);
147 end->ReplaceInput(0, merge);
148 to_add = merge;
149 }
150 } else {
151 // Append a new input to the final merge at the end.
152 merge->AppendInput(graph()->zone(), loop);
153 merge->set_op(common_->Merge(merge->InputCount()));
154 }
155 nodes.push_back(to_add);
156 state_[to_add->id()] = kVisited;
157 AddBackwardsReachableNodes(nodes, nodes.size() - 1);
158 }
159
160 void AddNodesReachableFromEnd(NodeVector& nodes) {
161 Node* end = graph()->end();
162 if (!end->IsDead()) {
163 state_[end->id()] = kVisited;
164 nodes.push_back(end);
165 AddBackwardsReachableNodes(nodes, nodes.size() - 1);
166 }
167 }
168
169 void AddBackwardsReachableNodes(NodeVector& nodes, size_t cursor) {
170 while (cursor < nodes.size()) {
171 Node* node = nodes[cursor++];
52 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); 172 for (InputIter i = node->inputs().begin(); i != node->inputs().end();
53 ++i) { 173 ++i) {
54 Recurse(*i); // pushes node onto the stack if necessary. 174 Node* input = *i;
175 if (state_[input->id()] != kVisited) {
176 state_[input->id()] = kVisited;
177 nodes.push_back(input);
178 }
55 } 179 }
56 } 180 }
181 }
182
183 void Trim() {
184 // Gather all nodes backwards-reachable from end through inputs.
185 state_.assign(graph()->NodeCount(), kUnvisited);
186 NodeVector nodes(zone_);
187 AddNodesReachableFromEnd(nodes);
188
57 // Process cached nodes in the JSGraph too. 189 // Process cached nodes in the JSGraph too.
58 jsgraph_->GetCachedNodes(&nodes); 190 jsgraph_->GetCachedNodes(&nodes);
191 TrimNodes(nodes);
192 }
193
194 void TrimNodes(NodeVector& nodes) {
59 // Remove dead->live edges. 195 // Remove dead->live edges.
60 for (size_t j = 0; j < nodes.size(); j++) { 196 for (size_t j = 0; j < nodes.size(); j++) {
61 Node* node = nodes[j]; 197 Node* node = nodes[j];
62 for (UseIter i = node->uses().begin(); i != node->uses().end();) { 198 for (UseIter i = node->uses().begin(); i != node->uses().end();) {
63 size_t id = static_cast<size_t>((*i)->id()); 199 size_t id = static_cast<size_t>((*i)->id());
64 if (state_[id] != kVisited) { 200 if (state_[id] != kVisited) {
65 TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", (*i)->id(), 201 TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", (*i)->id(),
66 (*i)->op()->mnemonic(), i.index(), node->id(), 202 (*i)->op()->mnemonic(), i.index(), node->id(),
67 node->op()->mnemonic())); 203 node->op()->mnemonic()));
68 i.UpdateToAndIncrement(NULL); 204 i.UpdateToAndIncrement(NULL);
(...skipping 11 matching lines...) Expand all
80 CHECK_NE(NULL, *i); 216 CHECK_NE(NULL, *i);
81 } 217 }
82 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { 218 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
83 size_t id = static_cast<size_t>((*i)->id()); 219 size_t id = static_cast<size_t>((*i)->id());
84 CHECK_EQ(kVisited, state_[id]); 220 CHECK_EQ(kVisited, state_[id]);
85 } 221 }
86 } 222 }
87 #endif 223 #endif
88 } 224 }
89 225
226 // Reduce the node on the top of the stack.
227 // If an input {i} is not yet visited or needs to be revisited, push {i} onto
228 // the stack and return. Otherwise, all inputs are visited, so apply
229 // reductions for {node} and pop it off the stack.
230 void ReduceTop() {
231 int height = stack_.size();
232 Node* node = stack_.back();
233
234 if (node->IsDead()) return Pop(); // Node was killed while on stack.
235
236 TRACE(("ControlReduce: #%d:%s\n", node->id(), node->op()->mnemonic()));
237
238 // Recurse on an input if necessary.
239 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
Benedikt Meurer 2014/10/23 10:45:25 for (Node* const input : node->inputs()) if (Recur
titzer 2014/10/23 11:23:44 Done.
240 if (Recurse(*i)) return;
241 }
242
243 // All inputs should be visited or on stack. Apply reductions to node.
244 Node* replacement = ReduceNode(node);
245 if (replacement != node) ReplaceNode(node, replacement);
246
247 // After reducing the node, pop it off the stack.
248 CHECK_EQ(height, stack_.size());
Benedikt Meurer 2014/10/23 10:45:25 Windows builder will probably complain about this
titzer 2014/10/23 11:23:44 Fixed with static_cast
249 Pop();
250
251 // If there was a replacement, reduce it after popping {node}.
252 if (replacement != node) Recurse(replacement);
253 }
254
90 // Push a node onto the stack if its state is {kUnvisited} or {kRevisit}. 255 // Push a node onto the stack if its state is {kUnvisited} or {kRevisit}.
91 bool Recurse(Node* node) { 256 bool Recurse(Node* node) {
92 size_t id = static_cast<size_t>(node->id()); 257 size_t id = static_cast<size_t>(node->id());
93 if (id < state_.size()) { 258 if (id < state_.size()) {
94 if (state_[id] != kRevisit && state_[id] != kUnvisited) return false; 259 if (state_[id] != kRevisit && state_[id] != kUnvisited) return false;
95 } else { 260 } else {
96 state_.resize((3 * id) / 2, kUnvisited); 261 state_.resize((3 * id) / 2, kUnvisited);
97 } 262 }
98 Push(node); 263 Push(node);
99 return true; 264 return true;
100 } 265 }
101 266
102 void Push(Node* node) { 267 void Push(Node* node) {
103 state_[node->id()] = kOnStack; 268 state_[node->id()] = kOnStack;
104 stack_.push_back(node); 269 stack_.push_back(node);
105 } 270 }
271
272 void Pop() {
273 int pos = stack_.size() - 1;
274 DCHECK_GE(pos, 0);
275 DCHECK_EQ(kOnStack, state_[stack_[pos]->id()]);
276 state_[stack_[pos]->id()] = kVisited;
277 stack_.pop_back();
278 }
279
280 // Queue a node to be revisited if it has been visited once already.
281 void Revisit(Node* node) {
282 size_t id = static_cast<size_t>(node->id());
283 if (id < state_.size() && state_[id] == kVisited) {
284 TRACE((" Revisit #%d:%s\n", node->id(), node->op()->mnemonic()));
285 state_[id] = kRevisit;
286 revisit_.push_back(node);
287 }
288 }
289
290 Node* dead() {
291 if (dead_ == NULL) dead_ = graph()->NewNode(common_->Dead());
292 return dead_;
293 }
294
295 //===========================================================================
296 // Reducer implementation: perform reductions on a node.
297 //===========================================================================
298 Node* ReduceNode(Node* node) {
299 if (OperatorProperties::GetControlInputCount(node->op()) == 1) {
300 // If a node has only one control input and it is dead, replace with dead.
301 Node* control = NodeProperties::GetControlInput(node);
302 if (control->opcode() == IrOpcode::kDead) {
303 TRACE(("ControlDead: #%d:%s\n", node->id(), node->op()->mnemonic()));
304 return control;
305 }
306 }
307
308 // Reduce branches, phis, and merges.
309 switch (node->opcode()) {
310 case IrOpcode::kBranch:
311 return ReduceBranch(node);
312 case IrOpcode::kLoop:
313 case IrOpcode::kMerge:
314 return ReduceMerge(node);
315 case IrOpcode::kPhi:
316 case IrOpcode::kEffectPhi:
317 return ReducePhi(node);
318 default:
319 return node;
320 }
321 }
322
323 // Reduce redundant phis.
324 Node* ReducePhi(Node* node) {
325 int n = node->InputCount();
326 DCHECK_EQ(1, OperatorProperties::GetControlInputCount(node->op()));
327 if (node->opcode() == IrOpcode::kPhi) {
328 DCHECK_EQ(n, 1 + OperatorProperties::GetValueInputCount(node->op()));
329 }
330 if (node->opcode() == IrOpcode::kEffectPhi) {
331 DCHECK_EQ(n, 1 + OperatorProperties::GetEffectInputCount(node->op()));
332 }
333 if (n <= 1) return dead(); // No non-control inputs.
334 if (n == 2) return node->InputAt(0); // Only one non-control input.
335
336 Node* replacement = NULL;
337 Node::Inputs inputs = node->inputs();
338 for (InputIter it = inputs.begin(); n > 1; --n, ++it) {
339 Node* input = *it;
340 if (input->opcode() == IrOpcode::kDead) continue; // ignore dead inputs.
341 if (input != node && input != replacement) { // non-redundant input.
342 if (replacement != NULL) return node;
343 replacement = input;
344 }
345 }
346 return replacement == NULL ? dead() : replacement;
347 }
348
349 // Reduce merges by trimming away dead inputs from the merge and phis.
350 Node* ReduceMerge(Node* node) {
351 // Count the number of live inputs.
352 int live = 0;
353 int live_index = 0;
354 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
355 if ((*i)->opcode() != IrOpcode::kDead) {
356 live++;
357 live_index = i.index();
358 }
359 }
360
361 if (live > 1 && live == node->InputCount()) return node; // nothing to do.
362
363 TRACE(("ReduceMerge: #%d:%s (%d live)\n", node->id(),
364 node->op()->mnemonic(), live));
365
366 if (live == 0) return dead(); // no remaining inputs.
367
368 // Gather phis and effect phis to be edited.
369 ZoneDeque<Node*> phis(zone_);
Benedikt Meurer 2014/10/23 10:45:25 Hm, I'd like to have a ZoneStack here, i.e. a clas
titzer 2014/10/23 11:23:44 Order doesn't really matter here, so I could as we
370 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
Benedikt Meurer 2014/10/23 10:45:25 for (Node* const to : node->uses()) { ... }
titzer 2014/10/23 11:23:44 Done.
371 Node* to = *i;
372 if (to->opcode() == IrOpcode::kPhi ||
373 to->opcode() == IrOpcode::kEffectPhi) {
374 phis.push_back(to);
375 }
376 }
377
378 // Remove dead inputs from merge and corresponding phis.
379 while (!phis.empty()) {
380 Node* phi = phis.back();
381 phis.pop_back();
382 TRACE((" PhiInMerge: #%d:%s (%d live)\n", phi->id(),
383 phi->op()->mnemonic(), live));
384 if (live == 1) {
385 Node* result = phi->InputAt(live_index);
386 ReplaceNode(phi, result); // phi is redundant.
387 } else {
388 RemoveDeadInputs(node, phi);
389 Revisit(phi); // re-reduce every phi.
390 }
391 }
392
393 // If only one input is remaining, remove the merge altogether.
394 if (live == 1) return node->InputAt(live_index);
395 RemoveDeadInputs(node, node);
396 return node;
397 }
398
399 // Reduce branches with constant inputs.
400 Node* ReduceBranch(Node* node) {
401 Node* cond = node->InputAt(0);
402 bool is_true;
403 switch (cond->opcode()) {
404 case IrOpcode::kInt32Constant:
405 is_true = !Int32Matcher(cond).Is(0);
406 break;
407 case IrOpcode::kNumberConstant:
408 is_true = !NumberMatcher(cond).Is(0);
409 break;
410 case IrOpcode::kHeapConstant: {
411 Handle<Object> object =
412 HeapObjectMatcher<Object>(cond).Value().handle();
413 if (object->IsTrue())
414 is_true = true;
415 else if (object->IsFalse())
416 is_true = false;
417 else
418 return node; // TODO(turbofan): fold branches on strings, objects.
419 break;
420 }
421 default:
422 return node;
423 }
424
425 TRACE(("BranchReduce: #%d:%s = %s\n", node->id(), node->op()->mnemonic(),
426 is_true ? "true" : "false"));
427
428 // Replace IfTrue and IfFalse projections from this branch.
429 Node* control = NodeProperties::GetControlInput(node);
430 for (UseIter i = node->uses().begin(); i != node->uses().end();) {
431 Node* to = *i;
432 if (to->opcode() == IrOpcode::kIfTrue) {
433 TRACE((" IfTrue: #%d:%s\n", to->id(), to->op()->mnemonic()));
434 i.UpdateToAndIncrement(NULL);
435 ReplaceNode(to, is_true ? control : dead());
436 } else if (to->opcode() == IrOpcode::kIfFalse) {
437 TRACE((" IfFalse: #%d:%s\n", to->id(), to->op()->mnemonic()));
438 i.UpdateToAndIncrement(NULL);
439 ReplaceNode(to, is_true ? dead() : control);
440 } else {
441 ++i;
442 }
443 }
444 return control;
445 }
446
447 // Remove inputs to {node} corresponding to the dead inputs to {merge}
448 // and compact the remaining inputs, updating the operator.
449 void RemoveDeadInputs(Node* merge, Node* node) {
450 int pos = 0;
451 for (int i = 0; i < node->InputCount(); i++) {
452 // skip dead inputs.
453 if (i < merge->InputCount() &&
454 merge->InputAt(i)->opcode() == IrOpcode::kDead)
455 continue;
456 // compact live inputs.
457 if (pos != i) node->ReplaceInput(pos, node->InputAt(i));
458 pos++;
459 }
460 node->TrimInputCount(pos);
461 if (node->opcode() == IrOpcode::kPhi) {
462 node->set_op(common_->Phi(OpParameter<MachineType>(node->op()), pos - 1));
463 } else if (node->opcode() == IrOpcode::kEffectPhi) {
464 node->set_op(common_->EffectPhi(pos - 1));
465 } else if (node->opcode() == IrOpcode::kMerge) {
466 node->set_op(common_->Merge(pos));
467 } else if (node->opcode() == IrOpcode::kLoop) {
468 node->set_op(common_->Loop(pos));
469 } else {
470 UNREACHABLE();
471 }
472 }
473
474 void ReplaceNode(Node* node, Node* replacement) {
475 if (node == replacement) return;
476 TRACE((" Replace: #%d:%s with #%d:%s\n", node->id(),
477 node->op()->mnemonic(), replacement->id(),
478 replacement->op()->mnemonic()));
479 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
480 // Don't revisit this node if it refers to itself.
481 if (*i != node) Revisit(*i);
482 }
483 node->ReplaceUses(replacement);
484 node->Kill();
485 }
486
487 Graph* graph() { return jsgraph_->graph(); }
106 }; 488 };
107 489
490
108 void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph, 491 void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph,
109 CommonOperatorBuilder* common) { 492 CommonOperatorBuilder* common) {
110 ControlReducerImpl impl(zone, jsgraph, NULL); 493 ControlReducerImpl impl(zone, jsgraph, common);
111 // Only trim the graph for now. Control reduction can reduce non-terminating 494 // Only trim the graph for now. Control reduction can reduce non-terminating
112 // loops to graphs that are unschedulable at the moment. 495 // loops to graphs that are unschedulable at the moment.
496 impl.Reduce();
113 impl.Trim(); 497 impl.Trim();
114 } 498 }
115 499
116 500
117 void ControlReducer::TrimGraph(Zone* zone, JSGraph* jsgraph) { 501 void ControlReducer::TrimGraph(Zone* zone, JSGraph* jsgraph) {
118 ControlReducerImpl impl(zone, jsgraph, NULL); 502 ControlReducerImpl impl(zone, jsgraph, NULL);
119 impl.Trim(); 503 impl.Trim();
120 } 504 }
505
506
507 Node* ControlReducer::ReducePhiForTesting(JSGraph* jsgraph,
508 CommonOperatorBuilder* common,
509 Node* node) {
510 Zone zone(jsgraph->graph()->zone()->isolate());
511 ControlReducerImpl impl(&zone, jsgraph, common);
512 return impl.ReducePhi(node);
513 }
514
515
516 Node* ControlReducer::ReduceMergeForTesting(JSGraph* jsgraph,
517 CommonOperatorBuilder* common,
518 Node* node) {
519 Zone zone(jsgraph->graph()->zone()->isolate());
520 ControlReducerImpl impl(&zone, jsgraph, common);
521 return impl.ReduceMerge(node);
522 }
523
524
525 Node* ControlReducer::ReduceBranchForTesting(JSGraph* jsgraph,
526 CommonOperatorBuilder* common,
527 Node* node) {
528 Zone zone(jsgraph->graph()->zone()->isolate());
529 ControlReducerImpl impl(&zone, jsgraph, common);
530 return impl.ReduceBranch(node);
531 }
121 } 532 }
122 } 533 }
123 } // namespace v8::internal::compiler 534 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/control-reducer.h ('k') | test/cctest/compiler/test-control-reducer.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698