Index: src/compiler/instruction-selector.cc |
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc |
index f34c815aba4737090c4b0c54504f4ecf1d8ab333..651f9ac44dc6f7bca22a5a35333bdaa0509a568d 100644 |
--- a/src/compiler/instruction-selector.cc |
+++ b/src/compiler/instruction-selector.cc |
@@ -30,8 +30,8 @@ void InstructionSelector::SelectInstructions() { |
for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) { |
BasicBlock* block = *i; |
if (!block->IsLoopHeader()) continue; |
- ASSERT_NE(0, block->PredecessorCount()); |
- ASSERT_NE(1, block->PredecessorCount()); |
+ DCHECK_NE(0, block->PredecessorCount()); |
+ DCHECK_NE(1, block->PredecessorCount()); |
for (BasicBlock::const_iterator j = block->begin(); j != block->end(); |
++j) { |
Node* phi = *j; |
@@ -151,19 +151,19 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const { |
bool InstructionSelector::IsDefined(Node* node) const { |
- ASSERT_NOT_NULL(node); |
+ DCHECK_NOT_NULL(node); |
NodeId id = node->id(); |
- ASSERT(id >= 0); |
- ASSERT(id < static_cast<NodeId>(defined_.size())); |
+ DCHECK(id >= 0); |
+ DCHECK(id < static_cast<NodeId>(defined_.size())); |
return defined_[id]; |
} |
void InstructionSelector::MarkAsDefined(Node* node) { |
- ASSERT_NOT_NULL(node); |
+ DCHECK_NOT_NULL(node); |
NodeId id = node->id(); |
- ASSERT(id >= 0); |
- ASSERT(id < static_cast<NodeId>(defined_.size())); |
+ DCHECK(id >= 0); |
+ DCHECK(id < static_cast<NodeId>(defined_.size())); |
defined_[id] = true; |
} |
@@ -171,30 +171,30 @@ void InstructionSelector::MarkAsDefined(Node* node) { |
bool InstructionSelector::IsUsed(Node* node) const { |
if (!node->op()->HasProperty(Operator::kEliminatable)) return true; |
NodeId id = node->id(); |
- ASSERT(id >= 0); |
- ASSERT(id < static_cast<NodeId>(used_.size())); |
+ DCHECK(id >= 0); |
+ DCHECK(id < static_cast<NodeId>(used_.size())); |
return used_[id]; |
} |
void InstructionSelector::MarkAsUsed(Node* node) { |
- ASSERT_NOT_NULL(node); |
+ DCHECK_NOT_NULL(node); |
NodeId id = node->id(); |
- ASSERT(id >= 0); |
- ASSERT(id < static_cast<NodeId>(used_.size())); |
+ DCHECK(id >= 0); |
+ DCHECK(id < static_cast<NodeId>(used_.size())); |
used_[id] = true; |
} |
bool InstructionSelector::IsDouble(const Node* node) const { |
- ASSERT_NOT_NULL(node); |
+ DCHECK_NOT_NULL(node); |
return sequence()->IsDouble(node->id()); |
} |
void InstructionSelector::MarkAsDouble(Node* node) { |
- ASSERT_NOT_NULL(node); |
- ASSERT(!IsReference(node)); |
+ DCHECK_NOT_NULL(node); |
+ DCHECK(!IsReference(node)); |
sequence()->MarkAsDouble(node->id()); |
// Propagate "doubleness" throughout phis. |
@@ -208,14 +208,14 @@ void InstructionSelector::MarkAsDouble(Node* node) { |
bool InstructionSelector::IsReference(const Node* node) const { |
- ASSERT_NOT_NULL(node); |
+ DCHECK_NOT_NULL(node); |
return sequence()->IsReference(node->id()); |
} |
void InstructionSelector::MarkAsReference(Node* node) { |
- ASSERT_NOT_NULL(node); |
- ASSERT(!IsDouble(node)); |
+ DCHECK_NOT_NULL(node); |
+ DCHECK(!IsDouble(node)); |
sequence()->MarkAsReference(node->id()); |
// Propagate "referenceness" throughout phis. |
@@ -230,7 +230,7 @@ void InstructionSelector::MarkAsReference(Node* node) { |
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep, |
Node* node) { |
- ASSERT_NOT_NULL(node); |
+ DCHECK_NOT_NULL(node); |
if (rep == kMachineFloat64) MarkAsDouble(node); |
if (rep == kMachineTagged) MarkAsReference(node); |
} |
@@ -263,8 +263,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, |
BasicBlock* cont_node, |
BasicBlock* deopt_node) { |
OperandGenerator g(this); |
- ASSERT_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount()); |
- ASSERT_EQ(NodeProperties::GetValueInputCount(call), buffer->input_count()); |
+ DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount()); |
+ DCHECK_EQ(NodeProperties::GetValueInputCount(call), buffer->input_count()); |
if (buffer->descriptor->ReturnCount() > 0) { |
// Collect the projections that represent multiple outputs from this call. |
@@ -318,14 +318,14 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, |
// argument to the call. |
InputIter iter(call->inputs().begin()); |
for (int index = 0; index < input_count; ++iter, ++index) { |
- ASSERT(iter != call->inputs().end()); |
- ASSERT(index == iter.index()); |
+ DCHECK(iter != call->inputs().end()); |
+ DCHECK(index == iter.index()); |
if (index == 0) continue; // The first argument (callee) is already done. |
InstructionOperand* op = |
g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index)); |
if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) { |
int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1; |
- ASSERT(buffer->pushed_nodes[stack_index] == NULL); |
+ DCHECK(buffer->pushed_nodes[stack_index] == NULL); |
buffer->pushed_nodes[stack_index] = *iter; |
buffer->pushed_count++; |
} else { |
@@ -337,22 +337,22 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, |
// If the call can deoptimize, we add the continuation and deoptimization |
// block labels. |
if (buffer->descriptor->CanLazilyDeoptimize()) { |
- ASSERT(cont_node != NULL); |
- ASSERT(deopt_node != NULL); |
+ DCHECK(cont_node != NULL); |
+ DCHECK(deopt_node != NULL); |
buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node); |
buffer->fixed_and_control_args[buffer->fixed_count + 1] = |
g.Label(deopt_node); |
} else { |
- ASSERT(cont_node == NULL); |
- ASSERT(deopt_node == NULL); |
+ DCHECK(cont_node == NULL); |
+ DCHECK(deopt_node == NULL); |
} |
- ASSERT(input_count == (buffer->fixed_count + buffer->pushed_count)); |
+ DCHECK(input_count == (buffer->fixed_count + buffer->pushed_count)); |
} |
void InstructionSelector::VisitBlock(BasicBlock* block) { |
- ASSERT_EQ(NULL, current_block_); |
+ DCHECK_EQ(NULL, current_block_); |
current_block_ = block; |
int current_block_end = static_cast<int>(instructions_.size()); |
@@ -401,7 +401,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) { |
case BasicBlockData::kGoto: |
return VisitGoto(block->SuccessorAt(0)); |
case BasicBlockData::kBranch: { |
- ASSERT_EQ(IrOpcode::kBranch, input->opcode()); |
+ DCHECK_EQ(IrOpcode::kBranch, input->opcode()); |
BasicBlock* tbranch = block->SuccessorAt(0); |
BasicBlock* fbranch = block->SuccessorAt(1); |
// SSA deconstruction requires targets of branches not to have phis. |
@@ -430,7 +430,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) { |
} |
case BasicBlockData::kNone: { |
// TODO(titzer): exit block doesn't have control. |
- ASSERT(input == NULL); |
+ DCHECK(input == NULL); |
break; |
} |
default: |
@@ -441,10 +441,10 @@ void InstructionSelector::VisitControl(BasicBlock* block) { |
void InstructionSelector::VisitNode(Node* node) { |
- ASSERT_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes. |
+ DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes. |
SourcePosition source_position = source_positions_->GetSourcePosition(node); |
if (!source_position.IsUnknown()) { |
- ASSERT(!source_position.IsInvalid()); |
+ DCHECK(!source_position.IsInvalid()); |
if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) { |
Emit(SourcePositionInstruction::New(instruction_zone(), source_position)); |
} |
@@ -796,7 +796,7 @@ void InstructionSelector::VisitProjection(Node* node) { |
if (OpParameter<int32_t>(node) == 0) { |
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); |
} else { |
- ASSERT_EQ(1, OpParameter<int32_t>(node)); |
+ DCHECK_EQ(1, OpParameter<int32_t>(node)); |
MarkAsUsed(value); |
} |
break; |
@@ -952,9 +952,9 @@ void InstructionSelector::VisitThrow(Node* value) { |
void InstructionSelector::VisitDeoptimization(Node* deopt) { |
- ASSERT(deopt->op()->opcode() == IrOpcode::kDeoptimize); |
+ DCHECK(deopt->op()->opcode() == IrOpcode::kDeoptimize); |
Node* state = deopt->InputAt(0); |
- ASSERT(state->op()->opcode() == IrOpcode::kFrameState); |
+ DCHECK(state->op()->opcode() == IrOpcode::kFrameState); |
FrameStateDescriptor descriptor = OpParameter<FrameStateDescriptor>(state); |
// TODO(jarin) We should also add an instruction input for every input to |
// the framestate node (and recurse for the inlined framestates). |