Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(358)

Side by Side Diff: src/compiler/scheduler.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/compiler/simplified-lowering.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/scheduler.h" 5 #include "src/compiler/scheduler.h"
6 6
7 #include "src/compiler/graph.h" 7 #include "src/compiler/graph.h"
8 #include "src/compiler/graph-inl.h" 8 #include "src/compiler/graph-inl.h"
9 #include "src/compiler/node.h" 9 #include "src/compiler/node.h"
10 #include "src/compiler/node-properties.h" 10 #include "src/compiler/node-properties.h"
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
160 NodeVector(NodeVector::allocator_type(zone))); 160 NodeVector(NodeVector::allocator_type(zone)));
161 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL); 161 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL);
162 } 162 }
163 163
164 164
165 void Scheduler::AddPredecessorsForLoopsAndMerges() { 165 void Scheduler::AddPredecessorsForLoopsAndMerges() {
166 for (NodeVectorIter i = loops_and_merges_.begin(); 166 for (NodeVectorIter i = loops_and_merges_.begin();
167 i != loops_and_merges_.end(); ++i) { 167 i != loops_and_merges_.end(); ++i) {
168 Node* merge_or_loop = *i; 168 Node* merge_or_loop = *i;
169 BasicBlock* block = schedule_->block(merge_or_loop); 169 BasicBlock* block = schedule_->block(merge_or_loop);
170 ASSERT(block != NULL); 170 DCHECK(block != NULL);
171 // For all of the merge's control inputs, add a goto at the end to the 171 // For all of the merge's control inputs, add a goto at the end to the
172 // merge's basic block. 172 // merge's basic block.
173 for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) { 173 for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) {
174 if (NodeProperties::IsBasicBlockBegin(*i)) { 174 if (NodeProperties::IsBasicBlockBegin(*i)) {
175 BasicBlock* predecessor_block = schedule_->block(*j); 175 BasicBlock* predecessor_block = schedule_->block(*j);
176 if ((*j)->opcode() != IrOpcode::kReturn && 176 if ((*j)->opcode() != IrOpcode::kReturn &&
177 (*j)->opcode() != IrOpcode::kDeoptimize) { 177 (*j)->opcode() != IrOpcode::kDeoptimize) {
178 ASSERT(predecessor_block != NULL); 178 DCHECK(predecessor_block != NULL);
179 if (FLAG_trace_turbo_scheduler) { 179 if (FLAG_trace_turbo_scheduler) {
180 IrOpcode::Value opcode = (*i)->opcode(); 180 IrOpcode::Value opcode = (*i)->opcode();
181 PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(), 181 PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(),
182 IrOpcode::Mnemonic(opcode), predecessor_block->id(), 182 IrOpcode::Mnemonic(opcode), predecessor_block->id(),
183 block->id()); 183 block->id());
184 } 184 }
185 schedule_->AddGoto(predecessor_block, block); 185 schedule_->AddGoto(predecessor_block, block);
186 } 186 }
187 } 187 }
188 } 188 }
189 } 189 }
190 } 190 }
191 191
192 192
193 void Scheduler::AddSuccessorsForCalls() { 193 void Scheduler::AddSuccessorsForCalls() {
194 for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) { 194 for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) {
195 Node* call = *i; 195 Node* call = *i;
196 ASSERT(call->opcode() == IrOpcode::kCall); 196 DCHECK(call->opcode() == IrOpcode::kCall);
197 ASSERT(NodeProperties::CanLazilyDeoptimize(call)); 197 DCHECK(NodeProperties::CanLazilyDeoptimize(call));
198 198
199 Node* lazy_deopt_node = NULL; 199 Node* lazy_deopt_node = NULL;
200 Node* cont_node = NULL; 200 Node* cont_node = NULL;
201 // Find the continuation and lazy-deopt nodes among the uses. 201 // Find the continuation and lazy-deopt nodes among the uses.
202 for (UseIter use_iter = call->uses().begin(); 202 for (UseIter use_iter = call->uses().begin();
203 use_iter != call->uses().end(); ++use_iter) { 203 use_iter != call->uses().end(); ++use_iter) {
204 switch ((*use_iter)->opcode()) { 204 switch ((*use_iter)->opcode()) {
205 case IrOpcode::kContinuation: { 205 case IrOpcode::kContinuation: {
206 ASSERT(cont_node == NULL); 206 DCHECK(cont_node == NULL);
207 cont_node = *use_iter; 207 cont_node = *use_iter;
208 break; 208 break;
209 } 209 }
210 case IrOpcode::kLazyDeoptimization: { 210 case IrOpcode::kLazyDeoptimization: {
211 ASSERT(lazy_deopt_node == NULL); 211 DCHECK(lazy_deopt_node == NULL);
212 lazy_deopt_node = *use_iter; 212 lazy_deopt_node = *use_iter;
213 break; 213 break;
214 } 214 }
215 default: 215 default:
216 break; 216 break;
217 } 217 }
218 } 218 }
219 ASSERT(lazy_deopt_node != NULL); 219 DCHECK(lazy_deopt_node != NULL);
220 ASSERT(cont_node != NULL); 220 DCHECK(cont_node != NULL);
221 BasicBlock* cont_successor_block = schedule_->block(cont_node); 221 BasicBlock* cont_successor_block = schedule_->block(cont_node);
222 BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node); 222 BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node);
223 Node* call_block_node = NodeProperties::GetControlInput(call); 223 Node* call_block_node = NodeProperties::GetControlInput(call);
224 BasicBlock* call_block = schedule_->block(call_block_node); 224 BasicBlock* call_block = schedule_->block(call_block_node);
225 if (FLAG_trace_turbo_scheduler) { 225 if (FLAG_trace_turbo_scheduler) {
226 IrOpcode::Value opcode = call->opcode(); 226 IrOpcode::Value opcode = call->opcode();
227 PrintF("node %d (%s) in block %d -> block %d\n", call->id(), 227 PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
228 IrOpcode::Mnemonic(opcode), call_block->id(), 228 IrOpcode::Mnemonic(opcode), call_block->id(),
229 cont_successor_block->id()); 229 cont_successor_block->id());
230 PrintF("node %d (%s) in block %d -> block %d\n", call->id(), 230 PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
231 IrOpcode::Mnemonic(opcode), call_block->id(), 231 IrOpcode::Mnemonic(opcode), call_block->id(),
232 deopt_successor_block->id()); 232 deopt_successor_block->id());
233 } 233 }
234 schedule_->AddCall(call_block, call, cont_successor_block, 234 schedule_->AddCall(call_block, call, cont_successor_block,
235 deopt_successor_block); 235 deopt_successor_block);
236 } 236 }
237 } 237 }
238 238
239 239
240 void Scheduler::AddSuccessorsForDeopts() { 240 void Scheduler::AddSuccessorsForDeopts() {
241 for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) { 241 for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) {
242 Node* deopt_block_node = NodeProperties::GetControlInput(*i); 242 Node* deopt_block_node = NodeProperties::GetControlInput(*i);
243 BasicBlock* deopt_block = schedule_->block(deopt_block_node); 243 BasicBlock* deopt_block = schedule_->block(deopt_block_node);
244 ASSERT(deopt_block != NULL); 244 DCHECK(deopt_block != NULL);
245 if (FLAG_trace_turbo_scheduler) { 245 if (FLAG_trace_turbo_scheduler) {
246 IrOpcode::Value opcode = (*i)->opcode(); 246 IrOpcode::Value opcode = (*i)->opcode();
247 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(), 247 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
248 IrOpcode::Mnemonic(opcode), deopt_block->id()); 248 IrOpcode::Mnemonic(opcode), deopt_block->id());
249 } 249 }
250 schedule_->AddDeoptimize(deopt_block, *i); 250 schedule_->AddDeoptimize(deopt_block, *i);
251 } 251 }
252 } 252 }
253 253
254 254
255 void Scheduler::AddSuccessorsForBranches() { 255 void Scheduler::AddSuccessorsForBranches() {
256 for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) { 256 for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) {
257 Node* branch = *i; 257 Node* branch = *i;
258 ASSERT(branch->opcode() == IrOpcode::kBranch); 258 DCHECK(branch->opcode() == IrOpcode::kBranch);
259 Node* branch_block_node = NodeProperties::GetControlInput(branch); 259 Node* branch_block_node = NodeProperties::GetControlInput(branch);
260 BasicBlock* branch_block = schedule_->block(branch_block_node); 260 BasicBlock* branch_block = schedule_->block(branch_block_node);
261 ASSERT(branch_block != NULL); 261 DCHECK(branch_block != NULL);
262 UseIter use_iter = branch->uses().begin(); 262 UseIter use_iter = branch->uses().begin();
263 Node* first_successor = *use_iter; 263 Node* first_successor = *use_iter;
264 ++use_iter; 264 ++use_iter;
265 ASSERT(use_iter != branch->uses().end()); 265 DCHECK(use_iter != branch->uses().end());
266 Node* second_successor = *use_iter; 266 Node* second_successor = *use_iter;
267 ASSERT(++use_iter == branch->uses().end()); 267 DCHECK(++use_iter == branch->uses().end());
268 Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue 268 Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
269 ? first_successor 269 ? first_successor
270 : second_successor; 270 : second_successor;
271 Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue 271 Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
272 ? second_successor 272 ? second_successor
273 : first_successor; 273 : first_successor;
274 ASSERT(true_successor_node->opcode() == IrOpcode::kIfTrue); 274 DCHECK(true_successor_node->opcode() == IrOpcode::kIfTrue);
275 ASSERT(false_successor_node->opcode() == IrOpcode::kIfFalse); 275 DCHECK(false_successor_node->opcode() == IrOpcode::kIfFalse);
276 BasicBlock* true_successor_block = schedule_->block(true_successor_node); 276 BasicBlock* true_successor_block = schedule_->block(true_successor_node);
277 BasicBlock* false_successor_block = schedule_->block(false_successor_node); 277 BasicBlock* false_successor_block = schedule_->block(false_successor_node);
278 ASSERT(true_successor_block != NULL); 278 DCHECK(true_successor_block != NULL);
279 ASSERT(false_successor_block != NULL); 279 DCHECK(false_successor_block != NULL);
280 if (FLAG_trace_turbo_scheduler) { 280 if (FLAG_trace_turbo_scheduler) {
281 IrOpcode::Value opcode = branch->opcode(); 281 IrOpcode::Value opcode = branch->opcode();
282 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(), 282 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
283 IrOpcode::Mnemonic(opcode), branch_block->id(), 283 IrOpcode::Mnemonic(opcode), branch_block->id(),
284 true_successor_block->id()); 284 true_successor_block->id());
285 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(), 285 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
286 IrOpcode::Mnemonic(opcode), branch_block->id(), 286 IrOpcode::Mnemonic(opcode), branch_block->id(),
287 false_successor_block->id()); 287 false_successor_block->id());
288 } 288 }
289 schedule_->AddBranch(branch_block, branch, true_successor_block, 289 schedule_->AddBranch(branch_block, branch, true_successor_block,
290 false_successor_block); 290 false_successor_block);
291 } 291 }
292 } 292 }
293 293
294 294
295 void Scheduler::AddSuccessorsForReturns() { 295 void Scheduler::AddSuccessorsForReturns() {
296 for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) { 296 for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) {
297 Node* return_block_node = NodeProperties::GetControlInput(*i); 297 Node* return_block_node = NodeProperties::GetControlInput(*i);
298 BasicBlock* return_block = schedule_->block(return_block_node); 298 BasicBlock* return_block = schedule_->block(return_block_node);
299 ASSERT(return_block != NULL); 299 DCHECK(return_block != NULL);
300 if (FLAG_trace_turbo_scheduler) { 300 if (FLAG_trace_turbo_scheduler) {
301 IrOpcode::Value opcode = (*i)->opcode(); 301 IrOpcode::Value opcode = (*i)->opcode();
302 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(), 302 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
303 IrOpcode::Mnemonic(opcode), return_block->id()); 303 IrOpcode::Mnemonic(opcode), return_block->id());
304 } 304 }
305 schedule_->AddReturn(return_block, *i); 305 schedule_->AddReturn(return_block, *i);
306 } 306 }
307 } 307 }
308 308
309 309
310 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { 310 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
311 while (b1 != b2) { 311 while (b1 != b2) {
312 int b1_rpo = GetRPONumber(b1); 312 int b1_rpo = GetRPONumber(b1);
313 int b2_rpo = GetRPONumber(b2); 313 int b2_rpo = GetRPONumber(b2);
314 ASSERT(b1_rpo != b2_rpo); 314 DCHECK(b1_rpo != b2_rpo);
315 if (b1_rpo < b2_rpo) { 315 if (b1_rpo < b2_rpo) {
316 b2 = schedule_->immediate_dominator_[b2->id()]; 316 b2 = schedule_->immediate_dominator_[b2->id()];
317 } else { 317 } else {
318 b1 = schedule_->immediate_dominator_[b1->id()]; 318 b1 = schedule_->immediate_dominator_[b1->id()];
319 } 319 }
320 } 320 }
321 return b1; 321 return b1;
322 } 322 }
323 323
324 324
325 void Scheduler::GenerateImmediateDominatorTree() { 325 void Scheduler::GenerateImmediateDominatorTree() {
326 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's 326 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
327 // if this becomes really slow. 327 // if this becomes really slow.
328 if (FLAG_trace_turbo_scheduler) { 328 if (FLAG_trace_turbo_scheduler) {
329 PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); 329 PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
330 } 330 }
331 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { 331 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
332 BasicBlock* current_rpo = schedule_->rpo_order_[i]; 332 BasicBlock* current_rpo = schedule_->rpo_order_[i];
333 if (current_rpo != schedule_->entry()) { 333 if (current_rpo != schedule_->entry()) {
334 BasicBlock::Predecessors::iterator current_pred = 334 BasicBlock::Predecessors::iterator current_pred =
335 current_rpo->predecessors().begin(); 335 current_rpo->predecessors().begin();
336 BasicBlock::Predecessors::iterator end = 336 BasicBlock::Predecessors::iterator end =
337 current_rpo->predecessors().end(); 337 current_rpo->predecessors().end();
338 ASSERT(current_pred != end); 338 DCHECK(current_pred != end);
339 BasicBlock* dominator = *current_pred; 339 BasicBlock* dominator = *current_pred;
340 ++current_pred; 340 ++current_pred;
341 // For multiple predecessors, walk up the rpo ordering until a common 341 // For multiple predecessors, walk up the rpo ordering until a common
342 // dominator is found. 342 // dominator is found.
343 int current_rpo_pos = GetRPONumber(current_rpo); 343 int current_rpo_pos = GetRPONumber(current_rpo);
344 while (current_pred != end) { 344 while (current_pred != end) {
345 // Don't examine backwards edges 345 // Don't examine backwards edges
346 BasicBlock* pred = *current_pred; 346 BasicBlock* pred = *current_pred;
347 if (GetRPONumber(pred) < current_rpo_pos) { 347 if (GetRPONumber(pred) < current_rpo_pos) {
348 dominator = GetCommonDominator(dominator, *current_pred); 348 dominator = GetCommonDominator(dominator, *current_pred);
(...skipping 15 matching lines...) Expand all
364 : has_changed_rpo_constraints_(true), 364 : has_changed_rpo_constraints_(true),
365 scheduler_(scheduler), 365 scheduler_(scheduler),
366 schedule_(scheduler->schedule_) {} 366 schedule_(scheduler->schedule_) {}
367 367
368 GenericGraphVisit::Control Pre(Node* node) { 368 GenericGraphVisit::Control Pre(Node* node) {
369 int id = node->id(); 369 int id = node->id();
370 int max_rpo = 0; 370 int max_rpo = 0;
371 // Fixed nodes already know their schedule early position. 371 // Fixed nodes already know their schedule early position.
372 if (IsFixedNode(node)) { 372 if (IsFixedNode(node)) {
373 BasicBlock* block = schedule_->block(node); 373 BasicBlock* block = schedule_->block(node);
374 ASSERT(block != NULL); 374 DCHECK(block != NULL);
375 max_rpo = block->rpo_number_; 375 max_rpo = block->rpo_number_;
376 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { 376 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
377 has_changed_rpo_constraints_ = true; 377 has_changed_rpo_constraints_ = true;
378 } 378 }
379 scheduler_->schedule_early_rpo_index_[id] = max_rpo; 379 scheduler_->schedule_early_rpo_index_[id] = max_rpo;
380 if (FLAG_trace_turbo_scheduler) { 380 if (FLAG_trace_turbo_scheduler) {
381 PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo); 381 PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo);
382 } 382 }
383 } 383 }
384 return GenericGraphVisit::CONTINUE; 384 return GenericGraphVisit::CONTINUE;
385 } 385 }
386 386
387 GenericGraphVisit::Control Post(Node* node) { 387 GenericGraphVisit::Control Post(Node* node) {
388 int id = node->id(); 388 int id = node->id();
389 int max_rpo = 0; 389 int max_rpo = 0;
390 // Otherwise, the minimum rpo for the node is the max of all of the inputs. 390 // Otherwise, the minimum rpo for the node is the max of all of the inputs.
391 if (!IsFixedNode(node)) { 391 if (!IsFixedNode(node)) {
392 ASSERT(!NodeProperties::IsBasicBlockBegin(node)); 392 DCHECK(!NodeProperties::IsBasicBlockBegin(node));
393 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); 393 for (InputIter i = node->inputs().begin(); i != node->inputs().end();
394 ++i) { 394 ++i) {
395 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()]; 395 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()];
396 if (control_rpo > max_rpo) { 396 if (control_rpo > max_rpo) {
397 max_rpo = control_rpo; 397 max_rpo = control_rpo;
398 } 398 }
399 } 399 }
400 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { 400 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
401 has_changed_rpo_constraints_ = true; 401 has_changed_rpo_constraints_ = true;
402 } 402 }
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
455 NodeProperties::HasFixedSchedulePosition(node)) { 455 NodeProperties::HasFixedSchedulePosition(node)) {
456 if (FLAG_trace_turbo_scheduler) { 456 if (FLAG_trace_turbo_scheduler) {
457 PrintF("Fixed position node %d is unscheduled, scheduling now\n", 457 PrintF("Fixed position node %d is unscheduled, scheduling now\n",
458 node->id()); 458 node->id());
459 } 459 }
460 IrOpcode::Value opcode = node->opcode(); 460 IrOpcode::Value opcode = node->opcode();
461 BasicBlock* block = 461 BasicBlock* block =
462 opcode == IrOpcode::kParameter 462 opcode == IrOpcode::kParameter
463 ? schedule_->entry() 463 ? schedule_->entry()
464 : schedule_->block(NodeProperties::GetControlInput(node)); 464 : schedule_->block(NodeProperties::GetControlInput(node));
465 ASSERT(block != NULL); 465 DCHECK(block != NULL);
466 schedule_->AddNode(block, node); 466 schedule_->AddNode(block, node);
467 } 467 }
468 468
469 if (NodeProperties::IsScheduleRoot(node)) { 469 if (NodeProperties::IsScheduleRoot(node)) {
470 scheduler_->schedule_root_nodes_.push_back(node); 470 scheduler_->schedule_root_nodes_.push_back(node);
471 } 471 }
472 472
473 return GenericGraphVisit::CONTINUE; 473 return GenericGraphVisit::CONTINUE;
474 } 474 }
475 475
476 void PostEdge(Node* from, int index, Node* to) { 476 void PostEdge(Node* from, int index, Node* to) {
477 // If the edge is from an unscheduled node, then tally it in the use count 477 // If the edge is from an unscheduled node, then tally it in the use count
478 // for all of its inputs. The same criterion will be used in ScheduleLate 478 // for all of its inputs. The same criterion will be used in ScheduleLate
479 // for decrementing use counts. 479 // for decrementing use counts.
480 if (!schedule_->IsScheduled(from) && NodeProperties::CanBeScheduled(from)) { 480 if (!schedule_->IsScheduled(from) && NodeProperties::CanBeScheduled(from)) {
481 ASSERT(!NodeProperties::HasFixedSchedulePosition(from)); 481 DCHECK(!NodeProperties::HasFixedSchedulePosition(from));
482 ++scheduler_->unscheduled_uses_[to->id()]; 482 ++scheduler_->unscheduled_uses_[to->id()];
483 if (FLAG_trace_turbo_scheduler) { 483 if (FLAG_trace_turbo_scheduler) {
484 PrintF("Incrementing uses of node %d from %d to %d\n", to->id(), 484 PrintF("Incrementing uses of node %d from %d to %d\n", to->id(),
485 from->id(), scheduler_->unscheduled_uses_[to->id()]); 485 from->id(), scheduler_->unscheduled_uses_[to->id()]);
486 } 486 }
487 } 487 }
488 } 488 }
489 489
490 private: 490 private:
491 Scheduler* scheduler_; 491 Scheduler* scheduler_;
(...skipping 15 matching lines...) Expand all
507 class ScheduleLateNodeVisitor : public NullNodeVisitor { 507 class ScheduleLateNodeVisitor : public NullNodeVisitor {
508 public: 508 public:
509 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) 509 explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
510 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} 510 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
511 511
512 GenericGraphVisit::Control Pre(Node* node) { 512 GenericGraphVisit::Control Pre(Node* node) {
513 // Don't schedule nodes that cannot be scheduled or are already scheduled. 513 // Don't schedule nodes that cannot be scheduled or are already scheduled.
514 if (!NodeProperties::CanBeScheduled(node) || schedule_->IsScheduled(node)) { 514 if (!NodeProperties::CanBeScheduled(node) || schedule_->IsScheduled(node)) {
515 return GenericGraphVisit::CONTINUE; 515 return GenericGraphVisit::CONTINUE;
516 } 516 }
517 ASSERT(!NodeProperties::HasFixedSchedulePosition(node)); 517 DCHECK(!NodeProperties::HasFixedSchedulePosition(node));
518 518
519 // If all the uses of a node have been scheduled, then the node itself can 519 // If all the uses of a node have been scheduled, then the node itself can
520 // be scheduled. 520 // be scheduled.
521 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; 521 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
522 if (FLAG_trace_turbo_scheduler) { 522 if (FLAG_trace_turbo_scheduler) {
523 PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(), 523 PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(),
524 eligible ? "true" : "false"); 524 eligible ? "true" : "false");
525 } 525 }
526 if (!eligible) return GenericGraphVisit::DEFER; 526 if (!eligible) return GenericGraphVisit::DEFER;
527 527
528 // Determine the dominating block for all of the uses of this node. It is 528 // Determine the dominating block for all of the uses of this node. It is
529 // the latest block that this node can be scheduled in. 529 // the latest block that this node can be scheduled in.
530 BasicBlock* block = NULL; 530 BasicBlock* block = NULL;
531 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); 531 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
532 ++i) { 532 ++i) {
533 BasicBlock* use_block = GetBlockForUse(i.edge()); 533 BasicBlock* use_block = GetBlockForUse(i.edge());
534 block = block == NULL ? use_block : use_block == NULL 534 block = block == NULL ? use_block : use_block == NULL
535 ? block 535 ? block
536 : scheduler_->GetCommonDominator( 536 : scheduler_->GetCommonDominator(
537 block, use_block); 537 block, use_block);
538 } 538 }
539 ASSERT(block != NULL); 539 DCHECK(block != NULL);
540 540
541 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()]; 541 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()];
542 if (FLAG_trace_turbo_scheduler) { 542 if (FLAG_trace_turbo_scheduler) {
543 PrintF( 543 PrintF(
544 "Schedule late conservative for node %d is block %d at " 544 "Schedule late conservative for node %d is block %d at "
545 "loop depth %d, min rpo = %d\n", 545 "loop depth %d, min rpo = %d\n",
546 node->id(), block->id(), block->loop_depth_, min_rpo); 546 node->id(), block->id(), block->loop_depth_, min_rpo);
547 } 547 }
548 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively 548 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
549 // into enlcosing loop pre-headers until they would preceed their 549 // into enlcosing loop pre-headers until they would preceed their
550 // ScheduleEarly position. 550 // ScheduleEarly position.
551 BasicBlock* hoist_block = block; 551 BasicBlock* hoist_block = block;
552 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) { 552 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
553 if (hoist_block->loop_depth_ < block->loop_depth_) { 553 if (hoist_block->loop_depth_ < block->loop_depth_) {
554 block = hoist_block; 554 block = hoist_block;
555 if (FLAG_trace_turbo_scheduler) { 555 if (FLAG_trace_turbo_scheduler) {
556 PrintF("Hoisting node %d to block %d\n", node->id(), block->id()); 556 PrintF("Hoisting node %d to block %d\n", node->id(), block->id());
557 } 557 }
558 } 558 }
559 // Try to hoist to the pre-header of the loop header. 559 // Try to hoist to the pre-header of the loop header.
560 hoist_block = hoist_block->loop_header(); 560 hoist_block = hoist_block->loop_header();
561 if (hoist_block != NULL) { 561 if (hoist_block != NULL) {
562 BasicBlock* pre_header = schedule_->dominator(hoist_block); 562 BasicBlock* pre_header = schedule_->dominator(hoist_block);
563 ASSERT(pre_header == NULL || 563 DCHECK(pre_header == NULL ||
564 *hoist_block->predecessors().begin() == pre_header); 564 *hoist_block->predecessors().begin() == pre_header);
565 if (FLAG_trace_turbo_scheduler) { 565 if (FLAG_trace_turbo_scheduler) {
566 PrintF( 566 PrintF(
567 "Try hoist to pre-header block %d of loop header block %d," 567 "Try hoist to pre-header block %d of loop header block %d,"
568 " depth would be %d\n", 568 " depth would be %d\n",
569 pre_header->id(), hoist_block->id(), pre_header->loop_depth_); 569 pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
570 } 570 }
571 hoist_block = pre_header; 571 hoist_block = pre_header;
572 } 572 }
573 } 573 }
574 574
575 ScheduleNode(block, node); 575 ScheduleNode(block, node);
576 576
577 return GenericGraphVisit::CONTINUE; 577 return GenericGraphVisit::CONTINUE;
578 } 578 }
579 579
580 private: 580 private:
581 BasicBlock* GetBlockForUse(Node::Edge edge) { 581 BasicBlock* GetBlockForUse(Node::Edge edge) {
582 Node* use = edge.from(); 582 Node* use = edge.from();
583 IrOpcode::Value opcode = use->opcode(); 583 IrOpcode::Value opcode = use->opcode();
584 // If the use is a phi, forward through the the phi to the basic block 584 // If the use is a phi, forward through the the phi to the basic block
585 // corresponding to the phi's input. 585 // corresponding to the phi's input.
586 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { 586 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
587 int index = edge.index(); 587 int index = edge.index();
588 if (FLAG_trace_turbo_scheduler) { 588 if (FLAG_trace_turbo_scheduler) {
589 PrintF("Use %d is input %d to a phi\n", use->id(), index); 589 PrintF("Use %d is input %d to a phi\n", use->id(), index);
590 } 590 }
591 use = NodeProperties::GetControlInput(use, 0); 591 use = NodeProperties::GetControlInput(use, 0);
592 opcode = use->opcode(); 592 opcode = use->opcode();
593 ASSERT(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); 593 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
594 use = NodeProperties::GetControlInput(use, index); 594 use = NodeProperties::GetControlInput(use, index);
595 } 595 }
596 BasicBlock* result = schedule_->block(use); 596 BasicBlock* result = schedule_->block(use);
597 if (result == NULL) return NULL; 597 if (result == NULL) return NULL;
598 if (FLAG_trace_turbo_scheduler) { 598 if (FLAG_trace_turbo_scheduler) {
599 PrintF("Must dominate use %d in block %d\n", use->id(), result->id()); 599 PrintF("Must dominate use %d in block %d\n", use->id(), result->id());
600 } 600 }
601 return result; 601 return result;
602 } 602 }
603 603
604 bool IsNodeEligible(Node* node) { 604 bool IsNodeEligible(Node* node) {
605 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; 605 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
606 return eligible; 606 return eligible;
607 } 607 }
608 608
609 void ScheduleNode(BasicBlock* block, Node* node) { 609 void ScheduleNode(BasicBlock* block, Node* node) {
610 schedule_->PlanNode(block, node); 610 schedule_->PlanNode(block, node);
611 scheduler_->scheduled_nodes_[block->id()].push_back(node); 611 scheduler_->scheduled_nodes_[block->id()].push_back(node);
612 612
613 // Reduce the use count of the node's inputs to potentially make them 613 // Reduce the use count of the node's inputs to potentially make them
614 // scheduable. 614 // scheduable.
615 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { 615 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
616 ASSERT(scheduler_->unscheduled_uses_[(*i)->id()] > 0); 616 DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0);
617 --scheduler_->unscheduled_uses_[(*i)->id()]; 617 --scheduler_->unscheduled_uses_[(*i)->id()];
618 if (FLAG_trace_turbo_scheduler) { 618 if (FLAG_trace_turbo_scheduler) {
619 PrintF("Decrementing use count for node %d from node %d (now %d)\n", 619 PrintF("Decrementing use count for node %d from node %d (now %d)\n",
620 (*i)->id(), i.edge().from()->id(), 620 (*i)->id(), i.edge().from()->id(),
621 scheduler_->unscheduled_uses_[(*i)->id()]); 621 scheduler_->unscheduled_uses_[(*i)->id()]);
622 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) { 622 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) {
623 PrintF("node %d is now eligible for scheduling\n", (*i)->id()); 623 PrintF("node %d is now eligible for scheduling\n", (*i)->id());
624 } 624 }
625 } 625 }
626 } 626 }
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
790 if (block->loop_end_ >= 0) { 790 if (block->loop_end_ >= 0) {
791 PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_); 791 PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
792 } 792 }
793 PrintF("\n"); 793 PrintF("\n");
794 } 794 }
795 } 795 }
796 796
797 797
798 static void VerifySpecialRPO(int num_loops, LoopInfo* loops, 798 static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
799 BasicBlockVector* order) { 799 BasicBlockVector* order) {
800 ASSERT(order->size() > 0); 800 DCHECK(order->size() > 0);
801 ASSERT((*order)[0]->id() == 0); // entry should be first. 801 DCHECK((*order)[0]->id() == 0); // entry should be first.
802 802
803 for (int i = 0; i < num_loops; i++) { 803 for (int i = 0; i < num_loops; i++) {
804 LoopInfo* loop = &loops[i]; 804 LoopInfo* loop = &loops[i];
805 BasicBlock* header = loop->header; 805 BasicBlock* header = loop->header;
806 806
807 ASSERT(header != NULL); 807 DCHECK(header != NULL);
808 ASSERT(header->rpo_number_ >= 0); 808 DCHECK(header->rpo_number_ >= 0);
809 ASSERT(header->rpo_number_ < static_cast<int>(order->size())); 809 DCHECK(header->rpo_number_ < static_cast<int>(order->size()));
810 ASSERT(header->loop_end_ >= 0); 810 DCHECK(header->loop_end_ >= 0);
811 ASSERT(header->loop_end_ <= static_cast<int>(order->size())); 811 DCHECK(header->loop_end_ <= static_cast<int>(order->size()));
812 ASSERT(header->loop_end_ > header->rpo_number_); 812 DCHECK(header->loop_end_ > header->rpo_number_);
813 813
814 // Verify the start ... end list relationship. 814 // Verify the start ... end list relationship.
815 int links = 0; 815 int links = 0;
816 BlockList* l = loop->start; 816 BlockList* l = loop->start;
817 ASSERT(l != NULL && l->block == header); 817 DCHECK(l != NULL && l->block == header);
818 bool end_found; 818 bool end_found;
819 while (true) { 819 while (true) {
820 if (l == NULL || l == loop->end) { 820 if (l == NULL || l == loop->end) {
821 end_found = (loop->end == l); 821 end_found = (loop->end == l);
822 break; 822 break;
823 } 823 }
824 // The list should be in same order as the final result. 824 // The list should be in same order as the final result.
825 ASSERT(l->block->rpo_number_ == links + loop->header->rpo_number_); 825 DCHECK(l->block->rpo_number_ == links + loop->header->rpo_number_);
826 links++; 826 links++;
827 l = l->next; 827 l = l->next;
828 ASSERT(links < static_cast<int>(2 * order->size())); // cycle? 828 DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
829 } 829 }
830 ASSERT(links > 0); 830 DCHECK(links > 0);
831 ASSERT(links == (header->loop_end_ - header->rpo_number_)); 831 DCHECK(links == (header->loop_end_ - header->rpo_number_));
832 ASSERT(end_found); 832 DCHECK(end_found);
833 833
834 // Check the contiguousness of loops. 834 // Check the contiguousness of loops.
835 int count = 0; 835 int count = 0;
836 for (int j = 0; j < static_cast<int>(order->size()); j++) { 836 for (int j = 0; j < static_cast<int>(order->size()); j++) {
837 BasicBlock* block = order->at(j); 837 BasicBlock* block = order->at(j);
838 ASSERT(block->rpo_number_ == j); 838 DCHECK(block->rpo_number_ == j);
839 if (j < header->rpo_number_ || j >= header->loop_end_) { 839 if (j < header->rpo_number_ || j >= header->loop_end_) {
840 ASSERT(!loop->members->Contains(block->id())); 840 DCHECK(!loop->members->Contains(block->id()));
841 } else { 841 } else {
842 if (block == header) { 842 if (block == header) {
843 ASSERT(!loop->members->Contains(block->id())); 843 DCHECK(!loop->members->Contains(block->id()));
844 } else { 844 } else {
845 ASSERT(loop->members->Contains(block->id())); 845 DCHECK(loop->members->Contains(block->id()));
846 } 846 }
847 count++; 847 count++;
848 } 848 }
849 } 849 }
850 ASSERT(links == count); 850 DCHECK(links == count);
851 } 851 }
852 } 852 }
853 #endif // DEBUG 853 #endif // DEBUG
854 854
855 855
856 // Compute the special reverse-post-order block ordering, which is essentially 856 // Compute the special reverse-post-order block ordering, which is essentially
857 // a RPO of the graph where loop bodies are contiguous. Properties: 857 // a RPO of the graph where loop bodies are contiguous. Properties:
858 // 1. If block A is a predecessor of B, then A appears before B in the order, 858 // 1. If block A is a predecessor of B, then A appears before B in the order,
859 // unless B is a loop header and A is in the loop headed at B 859 // unless B is a loop header and A is in the loop headed at B
860 // (i.e. A -> B is a backedge). 860 // (i.e. A -> B is a backedge).
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
893 if (succ->rpo_number_ == kBlockOnStack) { 893 if (succ->rpo_number_ == kBlockOnStack) {
894 // The successor is on the stack, so this is a backedge (cycle). 894 // The successor is on the stack, so this is a backedge (cycle).
895 backedges.Add( 895 backedges.Add(
896 std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone_); 896 std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone_);
897 if (succ->loop_end_ < 0) { 897 if (succ->loop_end_ < 0) {
898 // Assign a new loop number to the header if it doesn't have one. 898 // Assign a new loop number to the header if it doesn't have one.
899 succ->loop_end_ = num_loops++; 899 succ->loop_end_ = num_loops++;
900 } 900 }
901 } else { 901 } else {
902 // Push the successor onto the stack. 902 // Push the successor onto the stack.
903 ASSERT(succ->rpo_number_ == kBlockUnvisited1); 903 DCHECK(succ->rpo_number_ == kBlockUnvisited1);
904 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1); 904 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
905 } 905 }
906 } else { 906 } else {
907 // Finished with all successors; pop the stack and add the block. 907 // Finished with all successors; pop the stack and add the block.
908 order = order->Add(zone_, frame->block); 908 order = order->Add(zone_, frame->block);
909 frame->block->rpo_number_ = kBlockVisited1; 909 frame->block->rpo_number_ = kBlockVisited1;
910 stack_depth--; 910 stack_depth--;
911 } 911 }
912 } 912 }
913 913
(...skipping 20 matching lines...) Expand all
934 BasicBlock* succ = NULL; 934 BasicBlock* succ = NULL;
935 935
936 if (frame->index < block->SuccessorCount()) { 936 if (frame->index < block->SuccessorCount()) {
937 // Process the next normal successor. 937 // Process the next normal successor.
938 succ = block->SuccessorAt(frame->index++); 938 succ = block->SuccessorAt(frame->index++);
939 } else if (block->IsLoopHeader()) { 939 } else if (block->IsLoopHeader()) {
940 // Process additional outgoing edges from the loop header. 940 // Process additional outgoing edges from the loop header.
941 if (block->rpo_number_ == kBlockOnStack) { 941 if (block->rpo_number_ == kBlockOnStack) {
942 // Finish the loop body the first time the header is left on the 942 // Finish the loop body the first time the header is left on the
943 // stack. 943 // stack.
944 ASSERT(loop != NULL && loop->header == block); 944 DCHECK(loop != NULL && loop->header == block);
945 loop->start = order->Add(zone_, block); 945 loop->start = order->Add(zone_, block);
946 order = loop->end; 946 order = loop->end;
947 block->rpo_number_ = kBlockVisited2; 947 block->rpo_number_ = kBlockVisited2;
948 // Pop the loop stack and continue visiting outgoing edges within the 948 // Pop the loop stack and continue visiting outgoing edges within the
949 // the context of the outer loop, if any. 949 // the context of the outer loop, if any.
950 loop = loop->prev; 950 loop = loop->prev;
951 // We leave the loop header on the stack; the rest of this iteration 951 // We leave the loop header on the stack; the rest of this iteration
952 // and later iterations will go through its outgoing edges list. 952 // and later iterations will go through its outgoing edges list.
953 } 953 }
954 954
955 // Use the next outgoing edge if there are any. 955 // Use the next outgoing edge if there are any.
956 int outgoing_index = frame->index - block->SuccessorCount(); 956 int outgoing_index = frame->index - block->SuccessorCount();
957 LoopInfo* info = &loops[block->loop_end_]; 957 LoopInfo* info = &loops[block->loop_end_];
958 ASSERT(loop != info); 958 DCHECK(loop != info);
959 if (info->outgoing != NULL && 959 if (info->outgoing != NULL &&
960 outgoing_index < info->outgoing->length()) { 960 outgoing_index < info->outgoing->length()) {
961 succ = info->outgoing->at(outgoing_index); 961 succ = info->outgoing->at(outgoing_index);
962 frame->index++; 962 frame->index++;
963 } 963 }
964 } 964 }
965 965
966 if (succ != NULL) { 966 if (succ != NULL) {
967 // Process the next successor. 967 // Process the next successor.
968 if (succ->rpo_number_ == kBlockOnStack) continue; 968 if (succ->rpo_number_ == kBlockOnStack) continue;
969 if (succ->rpo_number_ == kBlockVisited2) continue; 969 if (succ->rpo_number_ == kBlockVisited2) continue;
970 ASSERT(succ->rpo_number_ == kBlockUnvisited2); 970 DCHECK(succ->rpo_number_ == kBlockUnvisited2);
971 if (loop != NULL && !loop->members->Contains(succ->id())) { 971 if (loop != NULL && !loop->members->Contains(succ->id())) {
972 // The successor is not in the current loop or any nested loop. 972 // The successor is not in the current loop or any nested loop.
973 // Add it to the outgoing edges of this loop and visit it later. 973 // Add it to the outgoing edges of this loop and visit it later.
974 loop->AddOutgoing(zone_, succ); 974 loop->AddOutgoing(zone_, succ);
975 } else { 975 } else {
976 // Push the successor onto the stack. 976 // Push the successor onto the stack.
977 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2); 977 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
978 if (succ->IsLoopHeader()) { 978 if (succ->IsLoopHeader()) {
979 // Push the inner loop onto the loop stack. 979 // Push the inner loop onto the loop stack.
980 ASSERT(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops); 980 DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
981 LoopInfo* next = &loops[succ->loop_end_]; 981 LoopInfo* next = &loops[succ->loop_end_];
982 next->end = order; 982 next->end = order;
983 next->prev = loop; 983 next->prev = loop;
984 loop = next; 984 loop = next;
985 } 985 }
986 } 986 }
987 } else { 987 } else {
988 // Finished with all successors of the current block. 988 // Finished with all successors of the current block.
989 if (block->IsLoopHeader()) { 989 if (block->IsLoopHeader()) {
990 // If we are going to pop a loop header, then add its entire body. 990 // If we are going to pop a loop header, then add its entire body.
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1027 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size()) 1027 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
1028 : end->block->rpo_number_; 1028 : end->block->rpo_number_;
1029 current_header = current_loop->header; 1029 current_header = current_loop->header;
1030 if (FLAG_trace_turbo_scheduler) { 1030 if (FLAG_trace_turbo_scheduler) {
1031 PrintF("Block %d is a loop header, increment loop depth to %d\n", 1031 PrintF("Block %d is a loop header, increment loop depth to %d\n",
1032 current->id(), loop_depth); 1032 current->id(), loop_depth);
1033 } 1033 }
1034 } else { 1034 } else {
1035 while (current_header != NULL && 1035 while (current_header != NULL &&
1036 current->rpo_number_ >= current_header->loop_end_) { 1036 current->rpo_number_ >= current_header->loop_end_) {
1037 ASSERT(current_header->IsLoopHeader()); 1037 DCHECK(current_header->IsLoopHeader());
1038 ASSERT(current_loop != NULL); 1038 DCHECK(current_loop != NULL);
1039 current_loop = current_loop->prev; 1039 current_loop = current_loop->prev;
1040 current_header = current_loop == NULL ? NULL : current_loop->header; 1040 current_header = current_loop == NULL ? NULL : current_loop->header;
1041 --loop_depth; 1041 --loop_depth;
1042 } 1042 }
1043 } 1043 }
1044 current->loop_depth_ = loop_depth; 1044 current->loop_depth_ = loop_depth;
1045 if (FLAG_trace_turbo_scheduler) { 1045 if (FLAG_trace_turbo_scheduler) {
1046 if (current->loop_header_ == NULL) { 1046 if (current->loop_header_ == NULL) {
1047 PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(), 1047 PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(),
1048 current->loop_depth_); 1048 current->loop_depth_);
1049 } else { 1049 } else {
1050 PrintF("Block %d's loop header is block %d, loop depth %d\n", 1050 PrintF("Block %d's loop header is block %d, loop depth %d\n",
1051 current->id(), current->loop_header_->id(), 1051 current->id(), current->loop_header_->id(),
1052 current->loop_depth_); 1052 current->loop_depth_);
1053 } 1053 }
1054 } 1054 }
1055 } 1055 }
1056 1056
1057 #if DEBUG 1057 #if DEBUG
1058 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); 1058 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
1059 VerifySpecialRPO(num_loops, loops, final_order); 1059 VerifySpecialRPO(num_loops, loops, final_order);
1060 #endif 1060 #endif
1061 return final_order; 1061 return final_order;
1062 } 1062 }
1063 } 1063 }
1064 } 1064 }
1065 } // namespace v8::internal::compiler 1065 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/compiler/simplified-lowering.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698