Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(315)

Side by Side Diff: src/compiler/control-reducer.cc

Issue 665223006: Implement control reducer, which reduces branches and phis together in a single fixpoint. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/control-reducer.h ('k') | src/compiler/operator-properties-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/common-operator.h" 5 #include "src/compiler/common-operator.h"
6 #include "src/compiler/control-reducer.h" 6 #include "src/compiler/control-reducer.h"
7 #include "src/compiler/graph.h" 7 #include "src/compiler/graph.h"
8 #include "src/compiler/js-graph.h" 8 #include "src/compiler/js-graph.h"
9 #include "src/compiler/node-matchers.h" 9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h" 10 #include "src/compiler/node-properties-inl.h"
11 #include "src/zone-containers.h" 11 #include "src/zone-containers.h"
12 12
13 namespace v8 { 13 namespace v8 {
14 namespace internal { 14 namespace internal {
15 namespace compiler { 15 namespace compiler {
16 16
17 enum VisitState { kUnvisited, kOnStack, kRevisit, kVisited }; 17 enum VisitState { kUnvisited = 0, kOnStack = 1, kRevisit = 2, kVisited = 3 };
18 enum Reachability { kFromStart = 8 };
18 19
19 #define TRACE(x) \ 20 #define TRACE(x) \
20 if (FLAG_trace_turbo) PrintF x 21 if (FLAG_trace_turbo) PrintF x
21 22
22 class ControlReducerImpl { 23 class ControlReducerImpl {
23 public: 24 public:
24 ControlReducerImpl(Zone* zone, JSGraph* jsgraph, 25 ControlReducerImpl(Zone* zone, JSGraph* jsgraph,
25 CommonOperatorBuilder* common) 26 CommonOperatorBuilder* common)
26 : zone_(zone), 27 : zone_(zone),
27 jsgraph_(jsgraph), 28 jsgraph_(jsgraph),
28 common_(common), 29 common_(common),
29 state_(jsgraph->graph()->NodeCount(), kUnvisited, zone_), 30 state_(jsgraph->graph()->NodeCount(), kUnvisited, zone_),
30 stack_(zone_), 31 stack_(zone_),
31 revisit_(zone_), 32 revisit_(zone_),
32 dead_(NULL) {} 33 dead_(NULL) {}
33 34
34 Zone* zone_; 35 Zone* zone_;
35 JSGraph* jsgraph_; 36 JSGraph* jsgraph_;
36 CommonOperatorBuilder* common_; 37 CommonOperatorBuilder* common_;
37 ZoneVector<VisitState> state_; 38 ZoneVector<VisitState> state_;
38 ZoneDeque<Node*> stack_; 39 ZoneDeque<Node*> stack_;
39 ZoneDeque<Node*> revisit_; 40 ZoneDeque<Node*> revisit_;
40 Node* dead_; 41 Node* dead_;
41 42
42 void Trim() { 43 void Reduce() {
43 // Mark all nodes reachable from end. 44 Push(graph()->end());
44 NodeVector nodes(zone_); 45 do {
45 state_.assign(jsgraph_->graph()->NodeCount(), kUnvisited); 46 // Process the node on the top of the stack, potentially pushing more
46 Push(jsgraph_->graph()->end()); 47 // or popping the node off the stack.
47 while (!stack_.empty()) { 48 ReduceTop();
48 Node* node = stack_[stack_.size() - 1]; 49 // If the stack becomes empty, revisit any nodes in the revisit queue.
49 stack_.pop_back(); 50 // If no nodes in the revisit queue, try removing dead loops.
50 state_[node->id()] = kVisited; 51 // If no dead loops, then finish.
51 nodes.push_back(node); 52 } while (!stack_.empty() || TryRevisit() || RepairAndRemoveLoops());
52 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); 53 }
53 ++i) { 54
54 Recurse(*i); // pushes node onto the stack if necessary. 55 bool TryRevisit() {
56 while (!revisit_.empty()) {
57 Node* n = revisit_.back();
58 revisit_.pop_back();
59 if (state_[n->id()] == kRevisit) { // state can change while in queue.
60 Push(n);
61 return true;
55 } 62 }
56 } 63 }
64 return false;
65 }
66
67 // Repair the graph after the possible creation of non-terminating or dead
68 // loops. Removing dead loops can produce more opportunities for reduction.
69 bool RepairAndRemoveLoops() {
70 // TODO(turbofan): we can skip this if the graph has no loops, but
71 // we have to be careful about proper loop detection during reduction.
72
73 // Gather all nodes backwards-reachable from end (through inputs).
74 state_.assign(graph()->NodeCount(), kUnvisited);
75 NodeVector nodes(zone_);
76 AddNodesReachableFromEnd(nodes);
77
78 // Walk forward through control nodes, looking for back edges to nodes
79 // that are not connected to end. Those are non-terminating loops (NTLs).
80 Node* start = graph()->start();
81 ZoneVector<byte> fw_reachability(graph()->NodeCount(), 0, zone_);
82 fw_reachability[start->id()] = kFromStart | kOnStack;
83 stack_.push_back(start);
84
85 while (!stack_.empty()) {
86 Node* node = stack_.back();
87 TRACE(("ControlFw: #%d:%s\n", node->id(), node->op()->mnemonic()));
88 bool pop = true;
89 for (Node* const succ : node->uses()) {
90 byte reach = fw_reachability[succ->id()];
91 if ((reach & kOnStack) != 0 && state_[succ->id()] != kVisited) {
92 // {succ} is on stack and not reachable from end.
93 ConnectNTL(nodes, succ);
94 fw_reachability.resize(graph()->NodeCount(), 0);
95 pop = false; // continue traversing inputs to this node.
96 break;
97 }
98 if ((reach & kFromStart) == 0 &&
99 IrOpcode::IsControlOpcode(succ->opcode())) {
100 // {succ} is a control node and not yet reached from start.
101 fw_reachability[succ->id()] |= kFromStart | kOnStack;
102 stack_.push_back(succ);
103 pop = false; // "recurse" into successor control node.
104 break;
105 }
106 }
107 if (pop) {
108 fw_reachability[node->id()] &= ~kOnStack;
109 stack_.pop_back();
110 }
111 }
112
113 // Trim references from dead nodes to live nodes first.
114 jsgraph_->GetCachedNodes(&nodes);
115 TrimNodes(nodes);
116
117 // Any control nodes not reachable from start are dead, even loops.
118 for (size_t i = 0; i < nodes.size(); i++) {
119 Node* node = nodes[i];
120 byte reach = fw_reachability[node->id()];
121 if ((reach & kFromStart) == 0 &&
122 IrOpcode::IsControlOpcode(node->opcode())) {
123 ReplaceNode(node, dead()); // uses will be added to revisit queue.
124 }
125 }
126 return TryRevisit(); // try to push a node onto the stack.
127 }
128
129 // Connect {loop}, the header of a non-terminating loop, to the end node.
130 void ConnectNTL(NodeVector& nodes, Node* loop) {
131 TRACE(("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic()));
132
133 if (loop->opcode() != IrOpcode::kTerminate) {
134 // Insert a {Terminate} node if the loop has effects.
135 ZoneDeque<Node*> effects(zone_);
136 for (Node* const use : loop->uses()) {
137 if (use->opcode() == IrOpcode::kEffectPhi) effects.push_back(use);
138 }
139 int count = static_cast<int>(effects.size());
140 if (count > 0) {
141 Node** inputs = zone_->NewArray<Node*>(1 + count);
142 for (int i = 0; i < count; i++) inputs[i] = effects[i];
143 inputs[count] = loop;
144 loop = graph()->NewNode(common_->Terminate(count), 1 + count, inputs);
145 TRACE(("AddTerminate: #%d:%s[%d]\n", loop->id(), loop->op()->mnemonic(),
146 count));
147 }
148 }
149
150 Node* to_add = loop;
151 Node* end = graph()->end();
152 CHECK_EQ(IrOpcode::kEnd, end->opcode());
153 Node* merge = end->InputAt(0);
154 if (merge == NULL || merge->opcode() == IrOpcode::kDead) {
155 // The end node died; just connect end to {loop}.
156 end->ReplaceInput(0, loop);
157 } else if (merge->opcode() != IrOpcode::kMerge) {
158 // Introduce a final merge node for {end->InputAt(0)} and {loop}.
159 merge = graph()->NewNode(common_->Merge(2), merge, loop);
160 end->ReplaceInput(0, merge);
161 to_add = merge;
162 } else {
163 // Append a new input to the final merge at the end.
164 merge->AppendInput(graph()->zone(), loop);
165 merge->set_op(common_->Merge(merge->InputCount()));
166 }
167 nodes.push_back(to_add);
168 state_.resize(graph()->NodeCount(), kUnvisited);
169 state_[to_add->id()] = kVisited;
170 AddBackwardsReachableNodes(nodes, nodes.size() - 1);
171 }
172
173 void AddNodesReachableFromEnd(NodeVector& nodes) {
174 Node* end = graph()->end();
175 state_[end->id()] = kVisited;
176 if (!end->IsDead()) {
177 nodes.push_back(end);
178 AddBackwardsReachableNodes(nodes, nodes.size() - 1);
179 }
180 }
181
182 void AddBackwardsReachableNodes(NodeVector& nodes, size_t cursor) {
183 while (cursor < nodes.size()) {
184 Node* node = nodes[cursor++];
185 for (Node* const input : node->inputs()) {
186 if (state_[input->id()] != kVisited) {
187 state_[input->id()] = kVisited;
188 nodes.push_back(input);
189 }
190 }
191 }
192 }
193
194 void Trim() {
195 // Gather all nodes backwards-reachable from end through inputs.
196 state_.assign(graph()->NodeCount(), kUnvisited);
197 NodeVector nodes(zone_);
198 AddNodesReachableFromEnd(nodes);
199
57 // Process cached nodes in the JSGraph too. 200 // Process cached nodes in the JSGraph too.
58 jsgraph_->GetCachedNodes(&nodes); 201 jsgraph_->GetCachedNodes(&nodes);
202 TrimNodes(nodes);
203 }
204
205 void TrimNodes(NodeVector& nodes) {
59 // Remove dead->live edges. 206 // Remove dead->live edges.
60 for (size_t j = 0; j < nodes.size(); j++) { 207 for (size_t j = 0; j < nodes.size(); j++) {
61 Node* node = nodes[j]; 208 Node* node = nodes[j];
62 for (UseIter i = node->uses().begin(); i != node->uses().end();) { 209 for (UseIter i = node->uses().begin(); i != node->uses().end();) {
63 size_t id = static_cast<size_t>((*i)->id()); 210 size_t id = static_cast<size_t>((*i)->id());
64 if (state_[id] != kVisited) { 211 if (state_[id] != kVisited) {
65 TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", (*i)->id(), 212 TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", (*i)->id(),
66 (*i)->op()->mnemonic(), i.index(), node->id(), 213 (*i)->op()->mnemonic(), i.index(), node->id(),
67 node->op()->mnemonic())); 214 node->op()->mnemonic()));
68 i.UpdateToAndIncrement(NULL); 215 i.UpdateToAndIncrement(NULL);
69 } else { 216 } else {
70 ++i; 217 ++i;
71 } 218 }
72 } 219 }
73 } 220 }
74 #if DEBUG 221 #if DEBUG
75 // Verify that no inputs to live nodes are NULL. 222 // Verify that no inputs to live nodes are NULL.
76 for (size_t j = 0; j < nodes.size(); j++) { 223 for (size_t j = 0; j < nodes.size(); j++) {
77 Node* node = nodes[j]; 224 Node* node = nodes[j];
78 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); 225 for (Node* const input : node->inputs()) {
79 ++i) { 226 CHECK_NE(NULL, input);
80 CHECK_NE(NULL, *i);
81 } 227 }
82 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { 228 for (Node* const use : node->uses()) {
83 size_t id = static_cast<size_t>((*i)->id()); 229 size_t id = static_cast<size_t>(use->id());
84 CHECK_EQ(kVisited, state_[id]); 230 CHECK_EQ(kVisited, state_[id]);
85 } 231 }
86 } 232 }
87 #endif 233 #endif
88 } 234 }
89 235
236 // Reduce the node on the top of the stack.
237 // If an input {i} is not yet visited or needs to be revisited, push {i} onto
238 // the stack and return. Otherwise, all inputs are visited, so apply
239 // reductions for {node} and pop it off the stack.
240 void ReduceTop() {
241 size_t height = stack_.size();
242 Node* node = stack_.back();
243
244 if (node->IsDead()) return Pop(); // Node was killed while on stack.
245
246 TRACE(("ControlReduce: #%d:%s\n", node->id(), node->op()->mnemonic()));
247
248 // Recurse on an input if necessary.
249 for (Node* const input : node->inputs()) {
250 if (Recurse(input)) return;
251 }
252
253 // All inputs should be visited or on stack. Apply reductions to node.
254 Node* replacement = ReduceNode(node);
255 if (replacement != node) ReplaceNode(node, replacement);
256
257 // After reducing the node, pop it off the stack.
258 CHECK_EQ(static_cast<int>(height), static_cast<int>(stack_.size()));
259 Pop();
260
261 // If there was a replacement, reduce it after popping {node}.
262 if (replacement != node) Recurse(replacement);
263 }
264
90 // Push a node onto the stack if its state is {kUnvisited} or {kRevisit}. 265 // Push a node onto the stack if its state is {kUnvisited} or {kRevisit}.
91 bool Recurse(Node* node) { 266 bool Recurse(Node* node) {
92 size_t id = static_cast<size_t>(node->id()); 267 size_t id = static_cast<size_t>(node->id());
93 if (id < state_.size()) { 268 if (id < state_.size()) {
94 if (state_[id] != kRevisit && state_[id] != kUnvisited) return false; 269 if (state_[id] != kRevisit && state_[id] != kUnvisited) return false;
95 } else { 270 } else {
96 state_.resize((3 * id) / 2, kUnvisited); 271 state_.resize((3 * id) / 2, kUnvisited);
97 } 272 }
98 Push(node); 273 Push(node);
99 return true; 274 return true;
100 } 275 }
101 276
102 void Push(Node* node) { 277 void Push(Node* node) {
103 state_[node->id()] = kOnStack; 278 state_[node->id()] = kOnStack;
104 stack_.push_back(node); 279 stack_.push_back(node);
105 } 280 }
281
282 void Pop() {
283 int pos = static_cast<int>(stack_.size()) - 1;
284 DCHECK_GE(pos, 0);
285 DCHECK_EQ(kOnStack, state_[stack_[pos]->id()]);
286 state_[stack_[pos]->id()] = kVisited;
287 stack_.pop_back();
288 }
289
290 // Queue a node to be revisited if it has been visited once already.
291 void Revisit(Node* node) {
292 size_t id = static_cast<size_t>(node->id());
293 if (id < state_.size() && state_[id] == kVisited) {
294 TRACE((" Revisit #%d:%s\n", node->id(), node->op()->mnemonic()));
295 state_[id] = kRevisit;
296 revisit_.push_back(node);
297 }
298 }
299
300 Node* dead() {
301 if (dead_ == NULL) dead_ = graph()->NewNode(common_->Dead());
302 return dead_;
303 }
304
305 //===========================================================================
306 // Reducer implementation: perform reductions on a node.
307 //===========================================================================
308 Node* ReduceNode(Node* node) {
309 if (OperatorProperties::GetControlInputCount(node->op()) == 1) {
310 // If a node has only one control input and it is dead, replace with dead.
311 Node* control = NodeProperties::GetControlInput(node);
312 if (control->opcode() == IrOpcode::kDead) {
313 TRACE(("ControlDead: #%d:%s\n", node->id(), node->op()->mnemonic()));
314 return control;
315 }
316 }
317
318 // Reduce branches, phis, and merges.
319 switch (node->opcode()) {
320 case IrOpcode::kBranch:
321 return ReduceBranch(node);
322 case IrOpcode::kLoop:
323 case IrOpcode::kMerge:
324 return ReduceMerge(node);
325 case IrOpcode::kPhi:
326 case IrOpcode::kEffectPhi:
327 return ReducePhi(node);
328 default:
329 return node;
330 }
331 }
332
333 // Reduce redundant phis.
334 Node* ReducePhi(Node* node) {
335 int n = node->InputCount();
336 if (n <= 1) return dead(); // No non-control inputs.
337 if (n == 2) return node->InputAt(0); // Only one non-control input.
338
339 Node* replacement = NULL;
340 Node::Inputs inputs = node->inputs();
341 for (InputIter it = inputs.begin(); n > 1; --n, ++it) {
342 Node* input = *it;
343 if (input->opcode() == IrOpcode::kDead) continue; // ignore dead inputs.
344 if (input != node && input != replacement) { // non-redundant input.
345 if (replacement != NULL) return node;
346 replacement = input;
347 }
348 }
349 return replacement == NULL ? dead() : replacement;
350 }
351
352 // Reduce merges by trimming away dead inputs from the merge and phis.
353 Node* ReduceMerge(Node* node) {
354 // Count the number of live inputs.
355 int live = 0;
356 int index = 0;
357 int live_index = 0;
358 for (Node* const input : node->inputs()) {
359 if (input->opcode() != IrOpcode::kDead) {
360 live++;
361 live_index = index;
362 }
363 index++;
364 }
365
366 if (live > 1 && live == node->InputCount()) return node; // nothing to do.
367
368 TRACE(("ReduceMerge: #%d:%s (%d live)\n", node->id(),
369 node->op()->mnemonic(), live));
370
371 if (live == 0) return dead(); // no remaining inputs.
372
373 // Gather phis and effect phis to be edited.
374 ZoneVector<Node*> phis(zone_);
375 for (Node* const use : node->uses()) {
376 if (use->opcode() == IrOpcode::kPhi ||
377 use->opcode() == IrOpcode::kEffectPhi) {
378 phis.push_back(use);
379 }
380 }
381
382 if (live == 1) {
383 // All phis are redundant. Replace them with their live input.
384 for (Node* const phi : phis) ReplaceNode(phi, phi->InputAt(live_index));
385 // The merge itself is redundant.
386 return node->InputAt(live_index);
387 }
388
389 // Edit phis in place, removing dead inputs and revisiting them.
390 for (Node* const phi : phis) {
391 TRACE((" PhiInMerge: #%d:%s (%d live)\n", phi->id(),
392 phi->op()->mnemonic(), live));
393 RemoveDeadInputs(node, phi);
394 Revisit(phi);
395 }
396 // Edit the merge in place, removing dead inputs.
397 RemoveDeadInputs(node, node);
398 return node;
399 }
400
401 // Reduce branches if they have constant inputs.
402 Node* ReduceBranch(Node* node) {
403 Node* cond = node->InputAt(0);
404 bool is_true;
405 switch (cond->opcode()) {
406 case IrOpcode::kInt32Constant:
407 is_true = !Int32Matcher(cond).Is(0);
408 break;
409 case IrOpcode::kNumberConstant:
410 is_true = !NumberMatcher(cond).Is(0);
411 break;
412 case IrOpcode::kHeapConstant: {
413 Handle<Object> object =
414 HeapObjectMatcher<Object>(cond).Value().handle();
415 if (object->IsTrue())
416 is_true = true;
417 else if (object->IsFalse())
418 is_true = false;
419 else
420 return node; // TODO(turbofan): fold branches on strings, objects.
421 break;
422 }
423 default:
424 return node;
425 }
426
427 TRACE(("BranchReduce: #%d:%s = %s\n", node->id(), node->op()->mnemonic(),
428 is_true ? "true" : "false"));
429
430 // Replace IfTrue and IfFalse projections from this branch.
431 Node* control = NodeProperties::GetControlInput(node);
432 for (UseIter i = node->uses().begin(); i != node->uses().end();) {
433 Node* to = *i;
434 if (to->opcode() == IrOpcode::kIfTrue) {
435 TRACE((" IfTrue: #%d:%s\n", to->id(), to->op()->mnemonic()));
436 i.UpdateToAndIncrement(NULL);
437 ReplaceNode(to, is_true ? control : dead());
438 } else if (to->opcode() == IrOpcode::kIfFalse) {
439 TRACE((" IfFalse: #%d:%s\n", to->id(), to->op()->mnemonic()));
440 i.UpdateToAndIncrement(NULL);
441 ReplaceNode(to, is_true ? dead() : control);
442 } else {
443 ++i;
444 }
445 }
446 return control;
447 }
448
449 // Remove inputs to {node} corresponding to the dead inputs to {merge}
450 // and compact the remaining inputs, updating the operator.
451 void RemoveDeadInputs(Node* merge, Node* node) {
452 int pos = 0;
453 for (int i = 0; i < node->InputCount(); i++) {
454 // skip dead inputs.
455 if (i < merge->InputCount() &&
456 merge->InputAt(i)->opcode() == IrOpcode::kDead)
457 continue;
458 // compact live inputs.
459 if (pos != i) node->ReplaceInput(pos, node->InputAt(i));
460 pos++;
461 }
462 node->TrimInputCount(pos);
463 if (node->opcode() == IrOpcode::kPhi) {
464 node->set_op(common_->Phi(OpParameter<MachineType>(node->op()), pos - 1));
465 } else if (node->opcode() == IrOpcode::kEffectPhi) {
466 node->set_op(common_->EffectPhi(pos - 1));
467 } else if (node->opcode() == IrOpcode::kMerge) {
468 node->set_op(common_->Merge(pos));
469 } else if (node->opcode() == IrOpcode::kLoop) {
470 node->set_op(common_->Loop(pos));
471 } else {
472 UNREACHABLE();
473 }
474 }
475
476 // Replace uses of {node} with {replacement} and revisit the uses.
477 void ReplaceNode(Node* node, Node* replacement) {
478 if (node == replacement) return;
479 TRACE((" Replace: #%d:%s with #%d:%s\n", node->id(),
480 node->op()->mnemonic(), replacement->id(),
481 replacement->op()->mnemonic()));
482 for (Node* const use : node->uses()) {
483 // Don't revisit this node if it refers to itself.
484 if (use != node) Revisit(use);
485 }
486 node->ReplaceUses(replacement);
487 node->Kill();
488 }
489
490 Graph* graph() { return jsgraph_->graph(); }
106 }; 491 };
107 492
493
108 void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph, 494 void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph,
109 CommonOperatorBuilder* common) { 495 CommonOperatorBuilder* common) {
110 ControlReducerImpl impl(zone, jsgraph, NULL); 496 ControlReducerImpl impl(zone, jsgraph, common);
111 // Only trim the graph for now. Control reduction can reduce non-terminating 497 impl.Reduce();
112 // loops to graphs that are unschedulable at the moment.
113 impl.Trim(); 498 impl.Trim();
114 } 499 }
115 500
116 501
117 void ControlReducer::TrimGraph(Zone* zone, JSGraph* jsgraph) { 502 void ControlReducer::TrimGraph(Zone* zone, JSGraph* jsgraph) {
118 ControlReducerImpl impl(zone, jsgraph, NULL); 503 ControlReducerImpl impl(zone, jsgraph, NULL);
119 impl.Trim(); 504 impl.Trim();
120 } 505 }
506
507
508 Node* ControlReducer::ReducePhiForTesting(JSGraph* jsgraph,
509 CommonOperatorBuilder* common,
510 Node* node) {
511 Zone zone(jsgraph->graph()->zone()->isolate());
512 ControlReducerImpl impl(&zone, jsgraph, common);
513 return impl.ReducePhi(node);
514 }
515
516
517 Node* ControlReducer::ReduceMergeForTesting(JSGraph* jsgraph,
518 CommonOperatorBuilder* common,
519 Node* node) {
520 Zone zone(jsgraph->graph()->zone()->isolate());
521 ControlReducerImpl impl(&zone, jsgraph, common);
522 return impl.ReduceMerge(node);
523 }
524
525
526 Node* ControlReducer::ReduceBranchForTesting(JSGraph* jsgraph,
527 CommonOperatorBuilder* common,
528 Node* node) {
529 Zone zone(jsgraph->graph()->zone()->isolate());
530 ControlReducerImpl impl(&zone, jsgraph, common);
531 return impl.ReduceBranch(node);
532 }
121 } 533 }
122 } 534 }
123 } // namespace v8::internal::compiler 535 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/control-reducer.h ('k') | src/compiler/operator-properties-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698