| Index: src/compiler/instruction-selector.cc
|
| diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..dce3c1c4bc2ef4959cf1501586d8996fae680fb5
|
| --- /dev/null
|
| +++ b/src/compiler/instruction-selector.cc
|
| @@ -0,0 +1,873 @@
|
| +// Copyright 2014 the V8 project authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "src/compiler/instruction-selector.h"
|
| +
|
| +#include "src/compiler/instruction-selector-impl.h"
|
| +#include "src/compiler/node-matchers.h"
|
| +#include "src/compiler/node-properties-inl.h"
|
| +
|
| +namespace v8 {
|
| +namespace internal {
|
| +namespace compiler {
|
| +
|
| +InstructionSelector::InstructionSelector(InstructionSequence* sequence,
|
| + SourcePositionTable* source_positions)
|
| + : zone_(sequence->isolate()),
|
| + sequence_(sequence),
|
| + source_positions_(source_positions),
|
| + current_block_(NULL),
|
| + instructions_(InstructionDeque::allocator_type(zone())),
|
| + used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {}
|
| +
|
| +
|
| +void InstructionSelector::SelectInstructions() {
|
| + // Mark the inputs of all phis in loop headers as used.
|
| + BasicBlockVector* blocks = schedule()->rpo_order();
|
| + for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
|
| + BasicBlock* block = *i;
|
| + if (!block->IsLoopHeader()) continue;
|
| + ASSERT_NE(0, block->PredecessorCount());
|
| + ASSERT_NE(1, block->PredecessorCount());
|
| + for (BasicBlock::const_iterator j = block->begin(); j != block->end();
|
| + ++j) {
|
| + Node* phi = *j;
|
| + if (phi->opcode() != IrOpcode::kPhi) continue;
|
| +
|
| + // Mark all inputs as used.
|
| + Node::Inputs inputs = phi->inputs();
|
| + for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
|
| + MarkAsUsed(*k);
|
| + }
|
| + }
|
| + }
|
| +
|
| + // Visit each basic block in post order.
|
| + for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
|
| + VisitBlock(*i);
|
| + }
|
| +
|
| + // Schedule the selected instructions.
|
| + for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
|
| + BasicBlock* block = *i;
|
| + size_t end = block->code_end_;
|
| + size_t start = block->code_start_;
|
| + sequence()->StartBlock(block);
|
| + while (start-- > end) {
|
| + sequence()->AddInstruction(instructions_[start], block);
|
| + }
|
| + sequence()->EndBlock(block);
|
| + }
|
| +}
|
| +
|
| +
|
| +Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
| + InstructionOperand* output,
|
| + size_t temp_count,
|
| + InstructionOperand** temps) {
|
| + size_t output_count = output == NULL ? 0 : 1;
|
| + return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
|
| +}
|
| +
|
| +
|
| +Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
| + InstructionOperand* output,
|
| + InstructionOperand* a, size_t temp_count,
|
| + InstructionOperand** temps) {
|
| + size_t output_count = output == NULL ? 0 : 1;
|
| + return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
|
| +}
|
| +
|
| +
|
| +Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
| + InstructionOperand* output,
|
| + InstructionOperand* a,
|
| + InstructionOperand* b, size_t temp_count,
|
| + InstructionOperand** temps) {
|
| + size_t output_count = output == NULL ? 0 : 1;
|
| + InstructionOperand* inputs[] = {a, b};
|
| + size_t input_count = ARRAY_SIZE(inputs);
|
| + return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
|
| + temps);
|
| +}
|
| +
|
| +
|
| +Instruction* InstructionSelector::Emit(InstructionCode opcode,
|
| + InstructionOperand* output,
|
| + InstructionOperand* a,
|
| + InstructionOperand* b,
|
| + InstructionOperand* c, size_t temp_count,
|
| + InstructionOperand** temps) {
|
| + size_t output_count = output == NULL ? 0 : 1;
|
| + InstructionOperand* inputs[] = {a, b, c};
|
| + size_t input_count = ARRAY_SIZE(inputs);
|
| + return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
|
| + temps);
|
| +}
|
| +
|
| +
|
| +Instruction* InstructionSelector::Emit(
|
| + InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
|
| + InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
|
| + size_t temp_count, InstructionOperand** temps) {
|
| + size_t output_count = output == NULL ? 0 : 1;
|
| + InstructionOperand* inputs[] = {a, b, c, d};
|
| + size_t input_count = ARRAY_SIZE(inputs);
|
| + return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
|
| + temps);
|
| +}
|
| +
|
| +
|
| +Instruction* InstructionSelector::Emit(
|
| + InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
|
| + size_t input_count, InstructionOperand** inputs, size_t temp_count,
|
| + InstructionOperand** temps) {
|
| + Instruction* instr =
|
| + Instruction::New(instruction_zone(), opcode, output_count, outputs,
|
| + input_count, inputs, temp_count, temps);
|
| + return Emit(instr);
|
| +}
|
| +
|
| +
|
| +Instruction* InstructionSelector::Emit(Instruction* instr) {
|
| + instructions_.push_back(instr);
|
| + return instr;
|
| +}
|
| +
|
| +
|
| +bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
|
| + return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
|
| + block->deferred_ == current_block_->deferred_;
|
| +}
|
| +
|
| +
|
| +bool InstructionSelector::CanCover(Node* user, Node* node) const {
|
| + return node->OwnedBy(user) &&
|
| + schedule()->block(node) == schedule()->block(user);
|
| +}
|
| +
|
| +
|
| +bool InstructionSelector::IsUsed(Node* node) const {
|
| + if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
|
| + NodeId id = node->id();
|
| + ASSERT(id >= 0);
|
| + ASSERT(id < static_cast<NodeId>(used_.size()));
|
| + return used_[id];
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::MarkAsUsed(Node* node) {
|
| + ASSERT_NOT_NULL(node);
|
| + NodeId id = node->id();
|
| + ASSERT(id >= 0);
|
| + ASSERT(id < static_cast<NodeId>(used_.size()));
|
| + used_[id] = true;
|
| +}
|
| +
|
| +
|
| +bool InstructionSelector::IsDouble(const Node* node) const {
|
| + ASSERT_NOT_NULL(node);
|
| + return sequence()->IsDouble(node->id());
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::MarkAsDouble(Node* node) {
|
| + ASSERT_NOT_NULL(node);
|
| + ASSERT(!IsReference(node));
|
| + sequence()->MarkAsDouble(node->id());
|
| +
|
| + // Propagate "doubleness" throughout phis.
|
| + for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
|
| + Node* user = *i;
|
| + if (user->opcode() != IrOpcode::kPhi) continue;
|
| + if (IsDouble(user)) continue;
|
| + MarkAsDouble(user);
|
| + }
|
| +}
|
| +
|
| +
|
| +bool InstructionSelector::IsReference(const Node* node) const {
|
| + ASSERT_NOT_NULL(node);
|
| + return sequence()->IsReference(node->id());
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::MarkAsReference(Node* node) {
|
| + ASSERT_NOT_NULL(node);
|
| + ASSERT(!IsDouble(node));
|
| + sequence()->MarkAsReference(node->id());
|
| +
|
| + // Propagate "referenceness" throughout phis.
|
| + for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
|
| + Node* user = *i;
|
| + if (user->opcode() != IrOpcode::kPhi) continue;
|
| + if (IsReference(user)) continue;
|
| + MarkAsReference(user);
|
| + }
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
|
| + Node* node) {
|
| + ASSERT_NOT_NULL(node);
|
| + if (rep == kMachineFloat64) MarkAsDouble(node);
|
| + if (rep == kMachineTagged) MarkAsReference(node);
|
| +}
|
| +
|
| +
|
| +// TODO(bmeurer): Get rid of the CallBuffer business and make
|
| +// InstructionSelector::VisitCall platform independent instead.
|
| +CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d)
|
| + : output_count(0),
|
| + descriptor(d),
|
| + output_nodes(zone->NewArray<Node*>(d->ReturnCount())),
|
| + outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())),
|
| + fixed_and_control_args(
|
| + zone->NewArray<InstructionOperand*>(input_count() + control_count())),
|
| + fixed_count(0),
|
| + pushed_nodes(zone->NewArray<Node*>(input_count())),
|
| + pushed_count(0) {
|
| + if (d->ReturnCount() > 1) {
|
| + memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT
|
| + }
|
| + memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT
|
| +}
|
| +
|
| +
|
| +// TODO(bmeurer): Get rid of the CallBuffer business and make
|
| +// InstructionSelector::VisitCall platform independent instead.
|
| +void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
|
| + bool call_code_immediate,
|
| + bool call_address_immediate,
|
| + BasicBlock* cont_node,
|
| + BasicBlock* deopt_node) {
|
| + OperandGenerator g(this);
|
| + ASSERT_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
|
| + ASSERT_EQ(NodeProperties::GetValueInputCount(call), buffer->input_count());
|
| +
|
| + if (buffer->descriptor->ReturnCount() > 0) {
|
| + // Collect the projections that represent multiple outputs from this call.
|
| + if (buffer->descriptor->ReturnCount() == 1) {
|
| + buffer->output_nodes[0] = call;
|
| + } else {
|
| + // Iterate over all uses of {call} and collect the projections into the
|
| + // {result} buffer.
|
| + for (UseIter i = call->uses().begin(); i != call->uses().end(); ++i) {
|
| + if ((*i)->opcode() == IrOpcode::kProjection) {
|
| + int index = OpParameter<int32_t>(*i);
|
| + ASSERT_GE(index, 0);
|
| + ASSERT_LT(index, buffer->descriptor->ReturnCount());
|
| + ASSERT_EQ(NULL, buffer->output_nodes[index]);
|
| + buffer->output_nodes[index] = *i;
|
| + }
|
| + }
|
| + }
|
| +
|
| + // Filter out the outputs that aren't live because no projection uses them.
|
| + for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) {
|
| + if (buffer->output_nodes[i] != NULL) {
|
| + Node* output = buffer->output_nodes[i];
|
| + LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
|
| + MarkAsRepresentation(location.representation(), output);
|
| + buffer->outputs[buffer->output_count++] =
|
| + g.DefineAsLocation(output, location);
|
| + }
|
| + }
|
| + }
|
| +
|
| + buffer->fixed_count = 1; // First argument is always the callee.
|
| + Node* callee = call->InputAt(0);
|
| + switch (buffer->descriptor->kind()) {
|
| + case CallDescriptor::kCallCodeObject:
|
| + buffer->fixed_and_control_args[0] =
|
| + (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
|
| + ? g.UseImmediate(callee)
|
| + : g.UseRegister(callee);
|
| + break;
|
| + case CallDescriptor::kCallAddress:
|
| + buffer->fixed_and_control_args[0] =
|
| + (call_address_immediate &&
|
| + (callee->opcode() == IrOpcode::kInt32Constant ||
|
| + callee->opcode() == IrOpcode::kInt64Constant))
|
| + ? g.UseImmediate(callee)
|
| + : g.UseRegister(callee);
|
| + break;
|
| + case CallDescriptor::kCallJSFunction:
|
| + buffer->fixed_and_control_args[0] =
|
| + g.UseLocation(callee, buffer->descriptor->GetInputLocation(0));
|
| + break;
|
| + }
|
| +
|
| + int input_count = buffer->input_count();
|
| +
|
| + // Split the arguments into pushed_nodes and fixed_args. Pushed arguments
|
| + // require an explicit push instruction before the call and do not appear
|
| + // as arguments to the call. Everything else ends up as an InstructionOperand
|
| + // argument to the call.
|
| + InputIter iter(call->inputs().begin());
|
| + for (int index = 0; index < input_count; ++iter, ++index) {
|
| + ASSERT(iter != call->inputs().end());
|
| + ASSERT(index == iter.index());
|
| + if (index == 0) continue; // The first argument (callee) is already done.
|
| + InstructionOperand* op =
|
| + g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index));
|
| + if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
|
| + int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
|
| + ASSERT(buffer->pushed_nodes[stack_index] == NULL);
|
| + buffer->pushed_nodes[stack_index] = *iter;
|
| + buffer->pushed_count++;
|
| + } else {
|
| + buffer->fixed_and_control_args[buffer->fixed_count] = op;
|
| + buffer->fixed_count++;
|
| + }
|
| + }
|
| +
|
| + // If the call can deoptimize, we add the continuation and deoptimization
|
| + // block labels.
|
| + if (buffer->descriptor->CanLazilyDeoptimize()) {
|
| + ASSERT(cont_node != NULL);
|
| + ASSERT(deopt_node != NULL);
|
| + buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node);
|
| + buffer->fixed_and_control_args[buffer->fixed_count + 1] =
|
| + g.Label(deopt_node);
|
| + } else {
|
| + ASSERT(cont_node == NULL);
|
| + ASSERT(deopt_node == NULL);
|
| + }
|
| +
|
| + ASSERT(input_count == (buffer->fixed_count + buffer->pushed_count));
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitBlock(BasicBlock* block) {
|
| + ASSERT_EQ(NULL, current_block_);
|
| + current_block_ = block;
|
| + size_t current_block_end = instructions_.size();
|
| +
|
| + // Generate code for the block control "top down", but schedule the code
|
| + // "bottom up".
|
| + VisitControl(block);
|
| + std::reverse(instructions_.begin() + current_block_end, instructions_.end());
|
| +
|
| + // Visit code in reverse control flow order, because architecture-specific
|
| + // matching may cover more than one node at a time.
|
| + for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
|
| + ++i) {
|
| + Node* node = *i;
|
| + if (!IsUsed(node)) continue;
|
| + // Generate code for this node "top down", but schedule the code "bottom
|
| + // up".
|
| + size_t current_node_end = instructions_.size();
|
| + VisitNode(node);
|
| + std::reverse(instructions_.begin() + current_node_end, instructions_.end());
|
| + }
|
| +
|
| + // We're done with the block.
|
| + // TODO(bmeurer): We should not mutate the schedule.
|
| + block->code_end_ = current_block_end;
|
| + block->code_start_ = instructions_.size();
|
| +
|
| + current_block_ = NULL;
|
| +}
|
| +
|
| +
|
| +static inline void CheckNoPhis(const BasicBlock* block) {
|
| +#ifdef DEBUG
|
| + // Branch targets should not have phis.
|
| + for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
|
| + const Node* node = *i;
|
| + CHECK_NE(IrOpcode::kPhi, node->opcode());
|
| + }
|
| +#endif
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitControl(BasicBlock* block) {
|
| + Node* input = block->control_input_;
|
| + switch (block->control_) {
|
| + case BasicBlockData::kGoto:
|
| + return VisitGoto(block->SuccessorAt(0));
|
| + case BasicBlockData::kBranch: {
|
| + ASSERT_EQ(IrOpcode::kBranch, input->opcode());
|
| + BasicBlock* tbranch = block->SuccessorAt(0);
|
| + BasicBlock* fbranch = block->SuccessorAt(1);
|
| + // SSA deconstruction requires targets of branches not to have phis.
|
| + // Edge split form guarantees this property, but is more strict.
|
| + CheckNoPhis(tbranch);
|
| + CheckNoPhis(fbranch);
|
| + if (tbranch == fbranch) return VisitGoto(tbranch);
|
| + return VisitBranch(input, tbranch, fbranch);
|
| + }
|
| + case BasicBlockData::kReturn: {
|
| + // If the result itself is a return, return its input.
|
| + Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
|
| + ? input->InputAt(0)
|
| + : input;
|
| + return VisitReturn(value);
|
| + }
|
| + case BasicBlockData::kThrow:
|
| + return VisitThrow(input);
|
| + case BasicBlockData::kDeoptimize:
|
| + return VisitDeoptimization(input);
|
| + case BasicBlockData::kCall: {
|
| + BasicBlock* deoptimization = block->SuccessorAt(0);
|
| + BasicBlock* continuation = block->SuccessorAt(1);
|
| + VisitCall(input, continuation, deoptimization);
|
| + break;
|
| + }
|
| + case BasicBlockData::kNone: {
|
| + // TODO(titzer): exit block doesn't have control.
|
| + ASSERT(input == NULL);
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitNode(Node* node) {
|
| + ASSERT_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
|
| + SourcePosition source_position = source_positions_->GetSourcePosition(node);
|
| + if (!source_position.IsUnknown()) {
|
| + ASSERT(!source_position.IsInvalid());
|
| + if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
|
| + Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
|
| + }
|
| + }
|
| + switch (node->opcode()) {
|
| + case IrOpcode::kStart:
|
| + case IrOpcode::kLoop:
|
| + case IrOpcode::kEnd:
|
| + case IrOpcode::kBranch:
|
| + case IrOpcode::kIfTrue:
|
| + case IrOpcode::kIfFalse:
|
| + case IrOpcode::kEffectPhi:
|
| + case IrOpcode::kMerge:
|
| + case IrOpcode::kProjection:
|
| + case IrOpcode::kLazyDeoptimization:
|
| + case IrOpcode::kContinuation:
|
| + // No code needed for these graph artifacts.
|
| + return;
|
| + case IrOpcode::kPhi:
|
| + return VisitPhi(node);
|
| + case IrOpcode::kParameter: {
|
| + int index = OpParameter<int>(node);
|
| + MachineRepresentation rep = linkage()
|
| + ->GetIncomingDescriptor()
|
| + ->GetInputLocation(index)
|
| + .representation();
|
| + MarkAsRepresentation(rep, node);
|
| + return VisitParameter(node);
|
| + }
|
| + case IrOpcode::kInt32Constant:
|
| + case IrOpcode::kInt64Constant:
|
| + case IrOpcode::kExternalConstant:
|
| + return VisitConstant(node);
|
| + case IrOpcode::kFloat64Constant:
|
| + return MarkAsDouble(node), VisitConstant(node);
|
| + case IrOpcode::kHeapConstant:
|
| + case IrOpcode::kNumberConstant:
|
| + // TODO(turbofan): only mark non-smis as references.
|
| + return MarkAsReference(node), VisitConstant(node);
|
| + case IrOpcode::kCall:
|
| + return VisitCall(node, NULL, NULL);
|
| + case IrOpcode::kFrameState:
|
| + // TODO(titzer): state nodes should be combined into their users.
|
| + return;
|
| + case IrOpcode::kLoad: {
|
| + MachineRepresentation load_rep = OpParameter<MachineRepresentation>(node);
|
| + MarkAsRepresentation(load_rep, node);
|
| + return VisitLoad(node);
|
| + }
|
| + case IrOpcode::kStore:
|
| + return VisitStore(node);
|
| + case IrOpcode::kWord32And:
|
| + return VisitWord32And(node);
|
| + case IrOpcode::kWord32Or:
|
| + return VisitWord32Or(node);
|
| + case IrOpcode::kWord32Xor:
|
| + return VisitWord32Xor(node);
|
| + case IrOpcode::kWord32Shl:
|
| + return VisitWord32Shl(node);
|
| + case IrOpcode::kWord32Shr:
|
| + return VisitWord32Shr(node);
|
| + case IrOpcode::kWord32Sar:
|
| + return VisitWord32Sar(node);
|
| + case IrOpcode::kWord32Equal:
|
| + return VisitWord32Equal(node);
|
| + case IrOpcode::kWord64And:
|
| + return VisitWord64And(node);
|
| + case IrOpcode::kWord64Or:
|
| + return VisitWord64Or(node);
|
| + case IrOpcode::kWord64Xor:
|
| + return VisitWord64Xor(node);
|
| + case IrOpcode::kWord64Shl:
|
| + return VisitWord64Shl(node);
|
| + case IrOpcode::kWord64Shr:
|
| + return VisitWord64Shr(node);
|
| + case IrOpcode::kWord64Sar:
|
| + return VisitWord64Sar(node);
|
| + case IrOpcode::kWord64Equal:
|
| + return VisitWord64Equal(node);
|
| + case IrOpcode::kInt32Add:
|
| + return VisitInt32Add(node);
|
| + case IrOpcode::kInt32Sub:
|
| + return VisitInt32Sub(node);
|
| + case IrOpcode::kInt32Mul:
|
| + return VisitInt32Mul(node);
|
| + case IrOpcode::kInt32Div:
|
| + return VisitInt32Div(node);
|
| + case IrOpcode::kInt32UDiv:
|
| + return VisitInt32UDiv(node);
|
| + case IrOpcode::kInt32Mod:
|
| + return VisitInt32Mod(node);
|
| + case IrOpcode::kInt32UMod:
|
| + return VisitInt32UMod(node);
|
| + case IrOpcode::kInt32LessThan:
|
| + return VisitInt32LessThan(node);
|
| + case IrOpcode::kInt32LessThanOrEqual:
|
| + return VisitInt32LessThanOrEqual(node);
|
| + case IrOpcode::kUint32LessThan:
|
| + return VisitUint32LessThan(node);
|
| + case IrOpcode::kUint32LessThanOrEqual:
|
| + return VisitUint32LessThanOrEqual(node);
|
| + case IrOpcode::kInt64Add:
|
| + return VisitInt64Add(node);
|
| + case IrOpcode::kInt64Sub:
|
| + return VisitInt64Sub(node);
|
| + case IrOpcode::kInt64Mul:
|
| + return VisitInt64Mul(node);
|
| + case IrOpcode::kInt64Div:
|
| + return VisitInt64Div(node);
|
| + case IrOpcode::kInt64UDiv:
|
| + return VisitInt64UDiv(node);
|
| + case IrOpcode::kInt64Mod:
|
| + return VisitInt64Mod(node);
|
| + case IrOpcode::kInt64UMod:
|
| + return VisitInt64UMod(node);
|
| + case IrOpcode::kInt64LessThan:
|
| + return VisitInt64LessThan(node);
|
| + case IrOpcode::kInt64LessThanOrEqual:
|
| + return VisitInt64LessThanOrEqual(node);
|
| + case IrOpcode::kConvertInt32ToInt64:
|
| + return VisitConvertInt32ToInt64(node);
|
| + case IrOpcode::kConvertInt64ToInt32:
|
| + return VisitConvertInt64ToInt32(node);
|
| + case IrOpcode::kConvertInt32ToFloat64:
|
| + return MarkAsDouble(node), VisitConvertInt32ToFloat64(node);
|
| + case IrOpcode::kConvertFloat64ToInt32:
|
| + return VisitConvertFloat64ToInt32(node);
|
| + case IrOpcode::kFloat64Add:
|
| + return MarkAsDouble(node), VisitFloat64Add(node);
|
| + case IrOpcode::kFloat64Sub:
|
| + return MarkAsDouble(node), VisitFloat64Sub(node);
|
| + case IrOpcode::kFloat64Mul:
|
| + return MarkAsDouble(node), VisitFloat64Mul(node);
|
| + case IrOpcode::kFloat64Div:
|
| + return MarkAsDouble(node), VisitFloat64Div(node);
|
| + case IrOpcode::kFloat64Mod:
|
| + return MarkAsDouble(node), VisitFloat64Mod(node);
|
| + case IrOpcode::kFloat64Equal:
|
| + return VisitFloat64Equal(node);
|
| + case IrOpcode::kFloat64LessThan:
|
| + return VisitFloat64LessThan(node);
|
| + case IrOpcode::kFloat64LessThanOrEqual:
|
| + return VisitFloat64LessThanOrEqual(node);
|
| + default:
|
| + V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
|
| + node->opcode(), node->op()->mnemonic(), node->id());
|
| + }
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitWord32Equal(Node* node) {
|
| + FlagsContinuation cont(kEqual, node);
|
| + Int32BinopMatcher m(node);
|
| + if (m.right().Is(0)) {
|
| + return VisitWord32Test(m.left().node(), &cont);
|
| + }
|
| + VisitWord32Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitInt32LessThan(Node* node) {
|
| + FlagsContinuation cont(kSignedLessThan, node);
|
| + VisitWord32Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
|
| + FlagsContinuation cont(kSignedLessThanOrEqual, node);
|
| + VisitWord32Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitUint32LessThan(Node* node) {
|
| + FlagsContinuation cont(kUnsignedLessThan, node);
|
| + VisitWord32Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
|
| + FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
|
| + VisitWord32Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitWord64Equal(Node* node) {
|
| + FlagsContinuation cont(kEqual, node);
|
| + Int64BinopMatcher m(node);
|
| + if (m.right().Is(0)) {
|
| + return VisitWord64Test(m.left().node(), &cont);
|
| + }
|
| + VisitWord64Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64LessThan(Node* node) {
|
| + FlagsContinuation cont(kSignedLessThan, node);
|
| + VisitWord64Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
|
| + FlagsContinuation cont(kSignedLessThanOrEqual, node);
|
| + VisitWord64Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat64Equal(Node* node) {
|
| + FlagsContinuation cont(kUnorderedEqual, node);
|
| + VisitFloat64Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat64LessThan(Node* node) {
|
| + FlagsContinuation cont(kUnorderedLessThan, node);
|
| + VisitFloat64Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
|
| + FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
|
| + VisitFloat64Compare(node, &cont);
|
| +}
|
| +
|
| +
|
| +// 32 bit targets do not implement the following instructions.
|
| +#if V8_TARGET_ARCH_32_BIT
|
| +
|
| +void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
|
| +
|
| +
|
| +void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
|
| + UNIMPLEMENTED();
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
|
| + UNIMPLEMENTED();
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
|
| + UNIMPLEMENTED();
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitWord64Compare(Node* node,
|
| + FlagsContinuation* cont) {
|
| + UNIMPLEMENTED();
|
| +}
|
| +
|
| +#endif // V8_TARGET_ARCH_32_BIT
|
| +
|
| +
|
| +void InstructionSelector::VisitPhi(Node* node) {
|
| + // TODO(bmeurer): Emit a PhiInstruction here.
|
| + for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
|
| + MarkAsUsed(*i);
|
| + }
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitParameter(Node* node) {
|
| + OperandGenerator g(this);
|
| + Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
|
| + OpParameter<int>(node))));
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitConstant(Node* node) {
|
| + // We must emit a NOP here because every live range needs a defining
|
| + // instruction in the register allocator.
|
| + OperandGenerator g(this);
|
| + Emit(kArchNop, g.DefineAsConstant(node));
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitGoto(BasicBlock* target) {
|
| + if (IsNextInAssemblyOrder(target)) {
|
| + // fall through to the next block.
|
| + Emit(kArchNop, NULL)->MarkAsControl();
|
| + } else {
|
| + // jump to the next block.
|
| + OperandGenerator g(this);
|
| + Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
|
| + }
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
|
| + BasicBlock* fbranch) {
|
| + OperandGenerator g(this);
|
| + Node* user = branch;
|
| + Node* value = branch->InputAt(0);
|
| +
|
| + FlagsContinuation cont(kNotEqual, tbranch, fbranch);
|
| +
|
| + // If we can fall through to the true block, invert the branch.
|
| + if (IsNextInAssemblyOrder(tbranch)) {
|
| + cont.Negate();
|
| + cont.SwapBlocks();
|
| + }
|
| +
|
| + // Try to combine with comparisons against 0 by simply inverting the branch.
|
| + while (CanCover(user, value)) {
|
| + if (value->opcode() == IrOpcode::kWord32Equal) {
|
| + Int32BinopMatcher m(value);
|
| + if (m.right().Is(0)) {
|
| + user = value;
|
| + value = m.left().node();
|
| + cont.Negate();
|
| + } else {
|
| + break;
|
| + }
|
| + } else if (value->opcode() == IrOpcode::kWord64Equal) {
|
| + Int64BinopMatcher m(value);
|
| + if (m.right().Is(0)) {
|
| + user = value;
|
| + value = m.left().node();
|
| + cont.Negate();
|
| + } else {
|
| + break;
|
| + }
|
| + } else {
|
| + break;
|
| + }
|
| + }
|
| +
|
| + // Try to combine the branch with a comparison.
|
| + if (CanCover(user, value)) {
|
| + switch (value->opcode()) {
|
| + case IrOpcode::kWord32Equal:
|
| + cont.OverwriteAndNegateIfEqual(kEqual);
|
| + return VisitWord32Compare(value, &cont);
|
| + case IrOpcode::kInt32LessThan:
|
| + cont.OverwriteAndNegateIfEqual(kSignedLessThan);
|
| + return VisitWord32Compare(value, &cont);
|
| + case IrOpcode::kInt32LessThanOrEqual:
|
| + cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
|
| + return VisitWord32Compare(value, &cont);
|
| + case IrOpcode::kUint32LessThan:
|
| + cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
|
| + return VisitWord32Compare(value, &cont);
|
| + case IrOpcode::kUint32LessThanOrEqual:
|
| + cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
|
| + return VisitWord32Compare(value, &cont);
|
| + case IrOpcode::kWord64Equal:
|
| + cont.OverwriteAndNegateIfEqual(kEqual);
|
| + return VisitWord64Compare(value, &cont);
|
| + case IrOpcode::kInt64LessThan:
|
| + cont.OverwriteAndNegateIfEqual(kSignedLessThan);
|
| + return VisitWord64Compare(value, &cont);
|
| + case IrOpcode::kInt64LessThanOrEqual:
|
| + cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
|
| + return VisitWord64Compare(value, &cont);
|
| + case IrOpcode::kFloat64Equal:
|
| + cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
|
| + return VisitFloat64Compare(value, &cont);
|
| + case IrOpcode::kFloat64LessThan:
|
| + cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
|
| + return VisitFloat64Compare(value, &cont);
|
| + case IrOpcode::kFloat64LessThanOrEqual:
|
| + cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
|
| + return VisitFloat64Compare(value, &cont);
|
| + default:
|
| + break;
|
| + }
|
| + }
|
| +
|
| + // Branch could not be combined with a compare, emit compare against 0.
|
| + VisitWord32Test(value, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitReturn(Node* value) {
|
| + OperandGenerator g(this);
|
| + if (value != NULL) {
|
| + Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation()));
|
| + } else {
|
| + Emit(kArchRet, NULL);
|
| + }
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitThrow(Node* value) {
|
| + UNIMPLEMENTED(); // TODO(titzer)
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitDeoptimization(Node* deopt) {
|
| + ASSERT(deopt->op()->opcode() == IrOpcode::kDeoptimize);
|
| + Node* state = deopt->InputAt(0);
|
| + ASSERT(state->op()->opcode() == IrOpcode::kFrameState);
|
| + FrameStateDescriptor descriptor = OpParameter<FrameStateDescriptor>(state);
|
| + // TODO(jarin) We should also add an instruction input for every input to
|
| + // the framestate node (and recurse for the inlined framestates).
|
| + int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor);
|
| + Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), NULL);
|
| +}
|
| +
|
| +} // namespace compiler
|
| +} // namespace internal
|
| +} // namespace v8
|
|
|