| Index: src/compiler/simplified-lowering.cc
|
| diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
|
| index de5fd3efded468b0eaf9cac62057ee1e727cdd67..3ef9d30fcd123e98755c9cbfd6cfebb1e2433da7 100644
|
| --- a/src/compiler/simplified-lowering.cc
|
| +++ b/src/compiler/simplified-lowering.cc
|
| @@ -4,14 +4,706 @@
|
|
|
| #include "src/compiler/simplified-lowering.h"
|
|
|
| +#include <deque>
|
| +#include <queue>
|
| +
|
| +#include "src/compiler/common-operator.h"
|
| #include "src/compiler/graph-inl.h"
|
| #include "src/compiler/node-properties-inl.h"
|
| +#include "src/compiler/representation-change.h"
|
| +#include "src/compiler/simplified-lowering.h"
|
| +#include "src/compiler/simplified-operator.h"
|
| #include "src/objects.h"
|
|
|
| namespace v8 {
|
| namespace internal {
|
| namespace compiler {
|
|
|
| +// Macro for outputting trace information from representation inference.
|
| +#define TRACE(x) \
|
| + if (FLAG_trace_representation) PrintF x
|
| +
|
| +// Representation selection and lowering of {Simplified} operators to machine
|
| +// operators are interwined. We use a fixpoint calculation to compute both the
|
| +// output representation and the best possible lowering for {Simplified} nodes.
|
| +// Representation change insertion ensures that all values are in the correct
|
| +// machine representation after this phase, as dictated by the machine
|
| +// operators themselves.
|
| +enum Phase {
|
| + // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information
|
| + // backwards from uses to definitions, around cycles in phis, according
|
| + // to local rules for each operator.
|
| + // During this phase, the usage information for a node determines the best
|
| + // possible lowering for each operator so far, and that in turn determines
|
| + // the output representation.
|
| + // Therefore, to be correct, this phase must iterate to a fixpoint before
|
| + // the next phase can begin.
|
| + PROPAGATE,
|
| +
|
| + // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some
|
| + // operators for some nodes, expanding some nodes to multiple nodes, or
|
| + // removing some (redundant) nodes.
|
| + // During this phase, use the {RepresentationChanger} to insert
|
| + // representation changes between uses that demand a particular
|
| + // representation and nodes that produce a different representation.
|
| + LOWER
|
| +};
|
| +
|
| +
|
| +class RepresentationSelector {
|
| + public:
|
| + // Information for each node tracked during the fixpoint.
|
| + struct NodeInfo {
|
| + RepTypeUnion use : 14; // Union of all usages for the node.
|
| + bool queued : 1; // Bookkeeping for the traversal.
|
| + bool visited : 1; // Bookkeeping for the traversal.
|
| + RepTypeUnion output : 14; // Output type of the node.
|
| + };
|
| +
|
| + RepresentationSelector(JSGraph* jsgraph, Zone* zone,
|
| + RepresentationChanger* changer)
|
| + : jsgraph_(jsgraph),
|
| + count_(jsgraph->graph()->NodeCount()),
|
| + info_(zone->NewArray<NodeInfo>(count_)),
|
| + nodes_(NodeVector::allocator_type(zone)),
|
| + replacements_(NodeVector::allocator_type(zone)),
|
| + contains_js_nodes_(false),
|
| + phase_(PROPAGATE),
|
| + changer_(changer),
|
| + queue_(std::deque<Node*, NodePtrZoneAllocator>(
|
| + NodePtrZoneAllocator(zone))) {
|
| + memset(info_, 0, sizeof(NodeInfo) * count_);
|
| + }
|
| +
|
| + void Run(SimplifiedLowering* lowering) {
|
| + // Run propagation phase to a fixpoint.
|
| + TRACE(("--{Propagation phase}--\n"));
|
| + phase_ = PROPAGATE;
|
| + Enqueue(jsgraph_->graph()->end());
|
| + // Process nodes from the queue until it is empty.
|
| + while (!queue_.empty()) {
|
| + Node* node = queue_.front();
|
| + NodeInfo* info = GetInfo(node);
|
| + queue_.pop();
|
| + info->queued = false;
|
| + TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
|
| + VisitNode(node, info->use, NULL);
|
| + TRACE((" ==> output "));
|
| + PrintInfo(info->output);
|
| + TRACE(("\n"));
|
| + }
|
| +
|
| + // Run lowering and change insertion phase.
|
| + TRACE(("--{Simplified lowering phase}--\n"));
|
| + phase_ = LOWER;
|
| + // Process nodes from the collected {nodes_} vector.
|
| + for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
|
| + Node* node = *i;
|
| + TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
|
| + // Reuse {VisitNode()} so the representation rules are in one place.
|
| + VisitNode(node, GetUseInfo(node), lowering);
|
| + }
|
| +
|
| + // Perform the final replacements.
|
| + for (NodeVector::iterator i = replacements_.begin();
|
| + i != replacements_.end(); ++i) {
|
| + Node* node = *i;
|
| + Node* replacement = *(++i);
|
| + node->ReplaceUses(replacement);
|
| + }
|
| + }
|
| +
|
| + // Enqueue {node} if the {use} contains new information for that node.
|
| + // Add {node} to {nodes_} if this is the first time it's been visited.
|
| + void Enqueue(Node* node, RepTypeUnion use = 0) {
|
| + if (phase_ != PROPAGATE) return;
|
| + NodeInfo* info = GetInfo(node);
|
| + if (!info->visited) {
|
| + // First visit of this node.
|
| + info->visited = true;
|
| + info->queued = true;
|
| + nodes_.push_back(node);
|
| + queue_.push(node);
|
| + TRACE((" initial: "));
|
| + info->use |= use;
|
| + PrintUseInfo(node);
|
| + return;
|
| + }
|
| + TRACE((" queue?: "));
|
| + PrintUseInfo(node);
|
| + if ((info->use & use) != use) {
|
| + // New usage information for the node is available.
|
| + if (!info->queued) {
|
| + queue_.push(node);
|
| + info->queued = true;
|
| + TRACE((" added: "));
|
| + } else {
|
| + TRACE((" inqueue: "));
|
| + }
|
| + info->use |= use;
|
| + PrintUseInfo(node);
|
| + }
|
| + }
|
| +
|
| + bool lower() { return phase_ == LOWER; }
|
| +
|
| + void Enqueue(Node* node, RepType use) {
|
| + Enqueue(node, static_cast<RepTypeUnion>(use));
|
| + }
|
| +
|
| + void SetOutput(Node* node, RepTypeUnion output) {
|
| + // Every node should have at most one output representation. Note that
|
| + // phis can have 0, if they have not been used in a representation-inducing
|
| + // instruction.
|
| + DCHECK((output & rMask) == 0 || IsPowerOf2(output & rMask));
|
| + GetInfo(node)->output = output;
|
| + }
|
| +
|
| + bool BothInputsAre(Node* node, Type* type) {
|
| + DCHECK_EQ(2, node->InputCount());
|
| + return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) &&
|
| + NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
|
| + }
|
| +
|
| + void ProcessInput(Node* node, int index, RepTypeUnion use) {
|
| + Node* input = node->InputAt(index);
|
| + if (phase_ == PROPAGATE) {
|
| + // In the propagate phase, propagate the usage information backward.
|
| + Enqueue(input, use);
|
| + } else {
|
| + // In the change phase, insert a change before the use if necessary.
|
| + if ((use & rMask) == 0) return; // No input requirement on the use.
|
| + RepTypeUnion output = GetInfo(input)->output;
|
| + if ((output & rMask & use) == 0) {
|
| + // Output representation doesn't match usage.
|
| + TRACE((" change: #%d:%s(@%d #%d:%s) ", node->id(),
|
| + node->op()->mnemonic(), index, input->id(),
|
| + input->op()->mnemonic()));
|
| + TRACE((" from "));
|
| + PrintInfo(output);
|
| + TRACE((" to "));
|
| + PrintInfo(use);
|
| + TRACE(("\n"));
|
| + Node* n = changer_->GetRepresentationFor(input, output, use);
|
| + node->ReplaceInput(index, n);
|
| + }
|
| + }
|
| + }
|
| +
|
| + static const RepTypeUnion kFloat64 = rFloat64 | tNumber;
|
| + static const RepTypeUnion kInt32 = rWord32 | tInt32;
|
| + static const RepTypeUnion kUint32 = rWord32 | tUint32;
|
| + static const RepTypeUnion kInt64 = rWord64 | tInt64;
|
| + static const RepTypeUnion kUint64 = rWord64 | tUint64;
|
| + static const RepTypeUnion kAnyTagged = rTagged | tAny;
|
| +
|
| + // The default, most general visitation case. For {node}, process all value,
|
| + // context, effect, and control inputs, assuming that value inputs should have
|
| + // {rTagged} representation and can observe all output values {tAny}.
|
| + void VisitInputs(Node* node) {
|
| + InputIter i = node->inputs().begin();
|
| + for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
|
| + ++i, j--) {
|
| + ProcessInput(node, i.index(), kAnyTagged); // Value inputs
|
| + }
|
| + for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
|
| + ++i, j--) {
|
| + ProcessInput(node, i.index(), kAnyTagged); // Context inputs
|
| + }
|
| + for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
|
| + ++i, j--) {
|
| + Enqueue(*i); // Effect inputs: just visit
|
| + }
|
| + for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
|
| + ++i, j--) {
|
| + Enqueue(*i); // Control inputs: just visit
|
| + }
|
| + SetOutput(node, kAnyTagged);
|
| + }
|
| +
|
| + // Helper for binops of the I x I -> O variety.
|
| + void VisitBinop(Node* node, RepTypeUnion input_use, RepTypeUnion output) {
|
| + DCHECK_EQ(2, node->InputCount());
|
| + ProcessInput(node, 0, input_use);
|
| + ProcessInput(node, 1, input_use);
|
| + SetOutput(node, output);
|
| + }
|
| +
|
| + // Helper for unops of the I -> O variety.
|
| + void VisitUnop(Node* node, RepTypeUnion input_use, RepTypeUnion output) {
|
| + DCHECK_EQ(1, node->InputCount());
|
| + ProcessInput(node, 0, input_use);
|
| + SetOutput(node, output);
|
| + }
|
| +
|
| + // Helper for leaf nodes.
|
| + void VisitLeaf(Node* node, RepTypeUnion output) {
|
| + DCHECK_EQ(0, node->InputCount());
|
| + SetOutput(node, output);
|
| + }
|
| +
|
| + // Helpers for specific types of binops.
|
| + void VisitFloat64Binop(Node* node) { VisitBinop(node, kFloat64, kFloat64); }
|
| + void VisitInt32Binop(Node* node) { VisitBinop(node, kInt32, kInt32); }
|
| + void VisitUint32Binop(Node* node) { VisitBinop(node, kUint32, kUint32); }
|
| + void VisitInt64Binop(Node* node) { VisitBinop(node, kInt64, kInt64); }
|
| + void VisitUint64Binop(Node* node) { VisitBinop(node, kUint64, kUint64); }
|
| + void VisitFloat64Cmp(Node* node) { VisitBinop(node, kFloat64, rBit); }
|
| + void VisitInt32Cmp(Node* node) { VisitBinop(node, kInt32, rBit); }
|
| + void VisitUint32Cmp(Node* node) { VisitBinop(node, kUint32, rBit); }
|
| + void VisitInt64Cmp(Node* node) { VisitBinop(node, kInt64, rBit); }
|
| + void VisitUint64Cmp(Node* node) { VisitBinop(node, kUint64, rBit); }
|
| +
|
| + // Helper for handling phis.
|
| + void VisitPhi(Node* node, RepTypeUnion use) {
|
| + // First, propagate the usage information to inputs of the phi.
|
| + int values = OperatorProperties::GetValueInputCount(node->op());
|
| + Node::Inputs inputs = node->inputs();
|
| + for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
|
| + ++iter, --values) {
|
| + // Propagate {use} of the phi to value inputs, and 0 to control.
|
| + // TODO(titzer): it'd be nice to have distinguished edge kinds here.
|
| + ProcessInput(node, iter.index(), values > 0 ? use : 0);
|
| + }
|
| + // Phis adapt to whatever output representation their uses demand,
|
| + // pushing representation changes to their inputs.
|
| + RepTypeUnion use_rep = GetUseInfo(node) & rMask;
|
| + RepTypeUnion use_type = GetUseInfo(node) & tMask;
|
| + RepTypeUnion rep = 0;
|
| + if (use_rep & rTagged) {
|
| + rep = rTagged; // Tagged overrides everything.
|
| + } else if (use_rep & rFloat64) {
|
| + rep = rFloat64;
|
| + } else if (use_rep & rWord64) {
|
| + rep = rWord64;
|
| + } else if (use_rep & rWord32) {
|
| + rep = rWord32;
|
| + } else if (use_rep & rBit) {
|
| + rep = rBit;
|
| + } else {
|
| + // There was no representation associated with any of the uses.
|
| + // TODO(titzer): Select the best rep using phi's type, not the usage type?
|
| + if (use_type & tAny) {
|
| + rep = rTagged;
|
| + } else if (use_type & tNumber) {
|
| + rep = rFloat64;
|
| + } else if (use_type & tInt64 || use_type & tUint64) {
|
| + rep = rWord64;
|
| + } else if (use_type & tInt32 || use_type & tUint32) {
|
| + rep = rWord32;
|
| + } else if (use_type & tBool) {
|
| + rep = rBit;
|
| + } else {
|
| + UNREACHABLE(); // should have at least a usage type!
|
| + }
|
| + }
|
| + // Preserve the usage type, but set the representation.
|
| + Type* upper = NodeProperties::GetBounds(node).upper;
|
| + SetOutput(node, rep | changer_->TypeFromUpperBound(upper));
|
| + }
|
| +
|
| + Operator* Int32Op(Node* node) {
|
| + return changer_->Int32OperatorFor(node->opcode());
|
| + }
|
| +
|
| + Operator* Uint32Op(Node* node) {
|
| + return changer_->Uint32OperatorFor(node->opcode());
|
| + }
|
| +
|
| + Operator* Float64Op(Node* node) {
|
| + return changer_->Float64OperatorFor(node->opcode());
|
| + }
|
| +
|
| + // Dispatching routine for visiting the node {node} with the usage {use}.
|
| + // Depending on the operator, propagate new usage info to the inputs.
|
| + void VisitNode(Node* node, RepTypeUnion use, SimplifiedLowering* lowering) {
|
| + switch (node->opcode()) {
|
| + //------------------------------------------------------------------
|
| + // Common operators.
|
| + //------------------------------------------------------------------
|
| + case IrOpcode::kStart:
|
| + case IrOpcode::kDead:
|
| + return VisitLeaf(node, 0);
|
| + case IrOpcode::kParameter: {
|
| + // TODO(titzer): use representation from linkage.
|
| + Type* upper = NodeProperties::GetBounds(node).upper;
|
| + ProcessInput(node, 0, 0);
|
| + SetOutput(node, rTagged | changer_->TypeFromUpperBound(upper));
|
| + return;
|
| + }
|
| + case IrOpcode::kInt32Constant:
|
| + return VisitLeaf(node, rWord32);
|
| + case IrOpcode::kInt64Constant:
|
| + return VisitLeaf(node, rWord64);
|
| + case IrOpcode::kFloat64Constant:
|
| + return VisitLeaf(node, rFloat64);
|
| + case IrOpcode::kExternalConstant:
|
| + return VisitLeaf(node, rPtr);
|
| + case IrOpcode::kNumberConstant:
|
| + return VisitLeaf(node, rTagged);
|
| + case IrOpcode::kHeapConstant:
|
| + return VisitLeaf(node, rTagged);
|
| +
|
| + case IrOpcode::kEnd:
|
| + case IrOpcode::kIfTrue:
|
| + case IrOpcode::kIfFalse:
|
| + case IrOpcode::kReturn:
|
| + case IrOpcode::kMerge:
|
| + case IrOpcode::kThrow:
|
| + return VisitInputs(node); // default visit for all node inputs.
|
| +
|
| + case IrOpcode::kBranch:
|
| + ProcessInput(node, 0, rBit);
|
| + Enqueue(NodeProperties::GetControlInput(node, 0));
|
| + break;
|
| + case IrOpcode::kPhi:
|
| + return VisitPhi(node, use);
|
| +
|
| +//------------------------------------------------------------------
|
| +// JavaScript operators.
|
| +//------------------------------------------------------------------
|
| +// For now, we assume that all JS operators were too complex to lower
|
| +// to Simplified and that they will always require tagged value inputs
|
| +// and produce tagged value outputs.
|
| +// TODO(turbofan): it might be possible to lower some JSOperators here,
|
| +// but that responsibility really lies in the typed lowering phase.
|
| +#define DEFINE_JS_CASE(x) case IrOpcode::k##x:
|
| + JS_OP_LIST(DEFINE_JS_CASE)
|
| +#undef DEFINE_JS_CASE
|
| + contains_js_nodes_ = true;
|
| + VisitInputs(node);
|
| + return SetOutput(node, rTagged);
|
| +
|
| + //------------------------------------------------------------------
|
| + // Simplified operators.
|
| + //------------------------------------------------------------------
|
| + case IrOpcode::kBooleanNot: {
|
| + if (lower()) {
|
| + RepTypeUnion input = GetInfo(node->InputAt(0))->output;
|
| + if (input & rBit) {
|
| + // BooleanNot(x: rBit) => WordEqual(x, #0)
|
| + node->set_op(lowering->machine()->WordEqual());
|
| + node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
|
| + } else {
|
| + // BooleanNot(x: rTagged) => WordEqual(x, #false)
|
| + node->set_op(lowering->machine()->WordEqual());
|
| + node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
|
| + }
|
| + } else {
|
| + // No input representation requirement; adapt during lowering.
|
| + ProcessInput(node, 0, tBool);
|
| + SetOutput(node, rBit);
|
| + }
|
| + break;
|
| + }
|
| + case IrOpcode::kNumberEqual:
|
| + case IrOpcode::kNumberLessThan:
|
| + case IrOpcode::kNumberLessThanOrEqual: {
|
| + // Number comparisons reduce to integer comparisons for integer inputs.
|
| + if (BothInputsAre(node, Type::Signed32())) {
|
| + // => signed Int32Cmp
|
| + VisitInt32Cmp(node);
|
| + if (lower()) node->set_op(Int32Op(node));
|
| + } else if (BothInputsAre(node, Type::Unsigned32())) {
|
| + // => unsigned Int32Cmp
|
| + VisitUint32Cmp(node);
|
| + if (lower()) node->set_op(Uint32Op(node));
|
| + } else {
|
| + // => Float64Cmp
|
| + VisitFloat64Cmp(node);
|
| + if (lower()) node->set_op(Float64Op(node));
|
| + }
|
| + break;
|
| + }
|
| + case IrOpcode::kNumberAdd:
|
| + case IrOpcode::kNumberSubtract: {
|
| + // Add and subtract reduce to Int32Add/Sub if the inputs
|
| + // are already integers and all uses are truncating.
|
| + if (BothInputsAre(node, Type::Signed32()) &&
|
| + (use & (tUint32 | tNumber | tAny)) == 0) {
|
| + // => signed Int32Add/Sub
|
| + VisitInt32Binop(node);
|
| + if (lower()) node->set_op(Int32Op(node));
|
| + } else if (BothInputsAre(node, Type::Unsigned32()) &&
|
| + (use & (tInt32 | tNumber | tAny)) == 0) {
|
| + // => unsigned Int32Add/Sub
|
| + VisitUint32Binop(node);
|
| + if (lower()) node->set_op(Uint32Op(node));
|
| + } else {
|
| + // => Float64Add/Sub
|
| + VisitFloat64Binop(node);
|
| + if (lower()) node->set_op(Float64Op(node));
|
| + }
|
| + break;
|
| + }
|
| + case IrOpcode::kNumberMultiply:
|
| + case IrOpcode::kNumberDivide:
|
| + case IrOpcode::kNumberModulus: {
|
| + // Float64Mul/Div/Mod
|
| + VisitFloat64Binop(node);
|
| + if (lower()) node->set_op(Float64Op(node));
|
| + break;
|
| + }
|
| + case IrOpcode::kNumberToInt32: {
|
| + RepTypeUnion use_rep = use & rMask;
|
| + if (lower()) {
|
| + RepTypeUnion in = GetInfo(node->InputAt(0))->output;
|
| + if ((in & tMask) == tInt32 || (in & rMask) == rWord32) {
|
| + // If the input has type int32, or is already a word32, just change
|
| + // representation if necessary.
|
| + VisitUnop(node, tInt32 | use_rep, tInt32 | use_rep);
|
| + DeferReplacement(node, node->InputAt(0));
|
| + } else {
|
| + // Require the input in float64 format and perform truncation.
|
| + // TODO(turbofan): could also avoid the truncation with a tag check.
|
| + VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32);
|
| + // TODO(titzer): should be a truncation.
|
| + node->set_op(lowering->machine()->ChangeFloat64ToInt32());
|
| + }
|
| + } else {
|
| + // Propagate a type to the input, but pass through representation.
|
| + VisitUnop(node, tInt32, tInt32 | use_rep);
|
| + }
|
| + break;
|
| + }
|
| + case IrOpcode::kNumberToUint32: {
|
| + RepTypeUnion use_rep = use & rMask;
|
| + if (lower()) {
|
| + RepTypeUnion in = GetInfo(node->InputAt(0))->output;
|
| + if ((in & tMask) == tUint32 || (in & rMask) == rWord32) {
|
| + // The input has type int32, just change representation.
|
| + VisitUnop(node, tUint32 | use_rep, tUint32 | use_rep);
|
| + DeferReplacement(node, node->InputAt(0));
|
| + } else {
|
| + // Require the input in float64 format to perform truncation.
|
| + // TODO(turbofan): could also avoid the truncation with a tag check.
|
| + VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32);
|
| + // TODO(titzer): should be a truncation.
|
| + node->set_op(lowering->machine()->ChangeFloat64ToUint32());
|
| + }
|
| + } else {
|
| + // Propagate a type to the input, but pass through representation.
|
| + VisitUnop(node, tUint32, tUint32 | use_rep);
|
| + }
|
| + break;
|
| + }
|
| + case IrOpcode::kReferenceEqual: {
|
| + VisitBinop(node, kAnyTagged, rBit);
|
| + if (lower()) node->set_op(lowering->machine()->WordEqual());
|
| + break;
|
| + }
|
| + case IrOpcode::kStringEqual: {
|
| + VisitBinop(node, kAnyTagged, rBit);
|
| + // TODO(titzer): lower StringEqual to stub/runtime call.
|
| + break;
|
| + }
|
| + case IrOpcode::kStringLessThan: {
|
| + VisitBinop(node, kAnyTagged, rBit);
|
| + // TODO(titzer): lower StringLessThan to stub/runtime call.
|
| + break;
|
| + }
|
| + case IrOpcode::kStringLessThanOrEqual: {
|
| + VisitBinop(node, kAnyTagged, rBit);
|
| + // TODO(titzer): lower StringLessThanOrEqual to stub/runtime call.
|
| + break;
|
| + }
|
| + case IrOpcode::kStringAdd: {
|
| + VisitBinop(node, kAnyTagged, kAnyTagged);
|
| + // TODO(titzer): lower StringAdd to stub/runtime call.
|
| + break;
|
| + }
|
| + case IrOpcode::kLoadField: {
|
| + FieldAccess access = FieldAccessOf(node->op());
|
| + ProcessInput(node, 0, changer_->TypeForBasePointer(access));
|
| + SetOutput(node, changer_->TypeForField(access));
|
| + if (lower()) lowering->DoLoadField(node);
|
| + break;
|
| + }
|
| + case IrOpcode::kStoreField: {
|
| + FieldAccess access = FieldAccessOf(node->op());
|
| + ProcessInput(node, 0, changer_->TypeForBasePointer(access));
|
| + ProcessInput(node, 1, changer_->TypeForField(access));
|
| + SetOutput(node, 0);
|
| + if (lower()) lowering->DoStoreField(node);
|
| + break;
|
| + }
|
| + case IrOpcode::kLoadElement: {
|
| + ElementAccess access = ElementAccessOf(node->op());
|
| + ProcessInput(node, 0, changer_->TypeForBasePointer(access));
|
| + ProcessInput(node, 1, kInt32); // element index
|
| + SetOutput(node, changer_->TypeForElement(access));
|
| + if (lower()) lowering->DoLoadElement(node);
|
| + break;
|
| + }
|
| + case IrOpcode::kStoreElement: {
|
| + ElementAccess access = ElementAccessOf(node->op());
|
| + ProcessInput(node, 0, changer_->TypeForBasePointer(access));
|
| + ProcessInput(node, 1, kInt32); // element index
|
| + ProcessInput(node, 2, changer_->TypeForElement(access));
|
| + SetOutput(node, 0);
|
| + if (lower()) lowering->DoStoreElement(node);
|
| + break;
|
| + }
|
| +
|
| + //------------------------------------------------------------------
|
| + // Machine-level operators.
|
| + //------------------------------------------------------------------
|
| + case IrOpcode::kLoad: {
|
| + // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
|
| + RepType tBase = rTagged;
|
| + MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
|
| + ProcessInput(node, 0, tBase); // pointer or object
|
| + ProcessInput(node, 1, kInt32); // index
|
| + SetOutput(node, changer_->TypeForMachineRepresentation(rep));
|
| + break;
|
| + }
|
| + case IrOpcode::kStore: {
|
| + // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
|
| + RepType tBase = rTagged;
|
| + StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
|
| + ProcessInput(node, 0, tBase); // pointer or object
|
| + ProcessInput(node, 1, kInt32); // index
|
| + ProcessInput(node, 2, changer_->TypeForMachineRepresentation(rep.rep));
|
| + SetOutput(node, 0);
|
| + break;
|
| + }
|
| + case IrOpcode::kWord32Shr:
|
| + // We output unsigned int32 for shift right because JavaScript.
|
| + return VisitBinop(node, rWord32, rWord32 | tUint32);
|
| + case IrOpcode::kWord32And:
|
| + case IrOpcode::kWord32Or:
|
| + case IrOpcode::kWord32Xor:
|
| + case IrOpcode::kWord32Shl:
|
| + case IrOpcode::kWord32Sar:
|
| + // We use signed int32 as the output type for these word32 operations,
|
| + // though the machine bits are the same for either signed or unsigned,
|
| + // because JavaScript considers the result from these operations signed.
|
| + return VisitBinop(node, rWord32, rWord32 | tInt32);
|
| + case IrOpcode::kWord32Equal:
|
| + return VisitBinop(node, rWord32, rBit);
|
| +
|
| + case IrOpcode::kInt32Add:
|
| + case IrOpcode::kInt32Sub:
|
| + case IrOpcode::kInt32Mul:
|
| + case IrOpcode::kInt32Div:
|
| + case IrOpcode::kInt32Mod:
|
| + return VisitInt32Binop(node);
|
| + case IrOpcode::kInt32UDiv:
|
| + case IrOpcode::kInt32UMod:
|
| + return VisitUint32Binop(node);
|
| + case IrOpcode::kInt32LessThan:
|
| + case IrOpcode::kInt32LessThanOrEqual:
|
| + return VisitInt32Cmp(node);
|
| +
|
| + case IrOpcode::kUint32LessThan:
|
| + case IrOpcode::kUint32LessThanOrEqual:
|
| + return VisitUint32Cmp(node);
|
| +
|
| + case IrOpcode::kInt64Add:
|
| + case IrOpcode::kInt64Sub:
|
| + case IrOpcode::kInt64Mul:
|
| + case IrOpcode::kInt64Div:
|
| + case IrOpcode::kInt64Mod:
|
| + return VisitInt64Binop(node);
|
| + case IrOpcode::kInt64LessThan:
|
| + case IrOpcode::kInt64LessThanOrEqual:
|
| + return VisitInt64Cmp(node);
|
| +
|
| + case IrOpcode::kInt64UDiv:
|
| + case IrOpcode::kInt64UMod:
|
| + return VisitUint64Binop(node);
|
| +
|
| + case IrOpcode::kWord64And:
|
| + case IrOpcode::kWord64Or:
|
| + case IrOpcode::kWord64Xor:
|
| + case IrOpcode::kWord64Shl:
|
| + case IrOpcode::kWord64Shr:
|
| + case IrOpcode::kWord64Sar:
|
| + return VisitBinop(node, rWord64, rWord64);
|
| + case IrOpcode::kWord64Equal:
|
| + return VisitBinop(node, rWord64, rBit);
|
| +
|
| + case IrOpcode::kConvertInt32ToInt64:
|
| + return VisitUnop(node, tInt32 | rWord32, tInt32 | rWord64);
|
| + case IrOpcode::kConvertInt64ToInt32:
|
| + return VisitUnop(node, tInt64 | rWord64, tInt32 | rWord32);
|
| +
|
| + case IrOpcode::kChangeInt32ToFloat64:
|
| + return VisitUnop(node, tInt32 | rWord32, tInt32 | rFloat64);
|
| + case IrOpcode::kChangeUint32ToFloat64:
|
| + return VisitUnop(node, tUint32 | rWord32, tUint32 | rFloat64);
|
| + case IrOpcode::kChangeFloat64ToInt32:
|
| + return VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32);
|
| + case IrOpcode::kChangeFloat64ToUint32:
|
| + return VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32);
|
| +
|
| + case IrOpcode::kFloat64Add:
|
| + case IrOpcode::kFloat64Sub:
|
| + case IrOpcode::kFloat64Mul:
|
| + case IrOpcode::kFloat64Div:
|
| + case IrOpcode::kFloat64Mod:
|
| + return VisitFloat64Binop(node);
|
| + case IrOpcode::kFloat64Equal:
|
| + case IrOpcode::kFloat64LessThan:
|
| + case IrOpcode::kFloat64LessThanOrEqual:
|
| + return VisitFloat64Cmp(node);
|
| + default:
|
| + VisitInputs(node);
|
| + break;
|
| + }
|
| + }
|
| +
|
| + void DeferReplacement(Node* node, Node* replacement) {
|
| + if (replacement->id() < count_) {
|
| + // Replace with a previously existing node eagerly.
|
| + node->ReplaceUses(replacement);
|
| + } else {
|
| + // Otherwise, we are replacing a node with a representation change.
|
| + // Such a substitution must be done after all lowering is done, because
|
| + // new nodes do not have {NodeInfo} entries, and that would confuse
|
| + // the representation change insertion for uses of it.
|
| + replacements_.push_back(node);
|
| + replacements_.push_back(replacement);
|
| + }
|
| + // TODO(titzer) node->RemoveAllInputs(); // Node is now dead.
|
| + }
|
| +
|
| + void PrintUseInfo(Node* node) {
|
| + TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic()));
|
| + PrintInfo(GetUseInfo(node));
|
| + TRACE(("\n"));
|
| + }
|
| +
|
| + void PrintInfo(RepTypeUnion info) {
|
| + if (FLAG_trace_representation) {
|
| + char buf[REP_TYPE_STRLEN];
|
| + RenderRepTypeUnion(buf, info);
|
| + TRACE(("%s", buf));
|
| + }
|
| + }
|
| +
|
| + private:
|
| + JSGraph* jsgraph_;
|
| + int count_; // number of nodes in the graph
|
| + NodeInfo* info_; // node id -> usage information
|
| + NodeVector nodes_; // collected nodes
|
| + NodeVector replacements_; // replacements to be done after lowering
|
| + bool contains_js_nodes_; // {true} if a JS operator was seen
|
| + Phase phase_; // current phase of algorithm
|
| + RepresentationChanger* changer_; // for inserting representation changes
|
| +
|
| + std::queue<Node*, std::deque<Node*, NodePtrZoneAllocator> > queue_;
|
| +
|
| + NodeInfo* GetInfo(Node* node) {
|
| + DCHECK(node->id() >= 0);
|
| + DCHECK(node->id() < count_);
|
| + return &info_[node->id()];
|
| + }
|
| +
|
| + RepTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
|
| +};
|
| +
|
| +
|
| Node* SimplifiedLowering::IsTagged(Node* node) {
|
| // TODO(titzer): factor this out to a TaggingScheme abstraction.
|
| STATIC_ASSERT(kSmiTagMask == 1); // Only works if tag is the low bit.
|
| @@ -20,6 +712,17 @@ Node* SimplifiedLowering::IsTagged(Node* node) {
|
| }
|
|
|
|
|
| +void SimplifiedLowering::LowerAllNodes() {
|
| + SimplifiedOperatorBuilder simplified(graph()->zone());
|
| + RepresentationChanger changer(jsgraph(), &simplified, machine(),
|
| + graph()->zone()->isolate());
|
| + RepresentationSelector selector(jsgraph(), zone(), &changer);
|
| + selector.Run(this);
|
| +
|
| + LoweringBuilder::LowerAllNodes();
|
| +}
|
| +
|
| +
|
| Node* SimplifiedLowering::Untag(Node* node) {
|
| // TODO(titzer): factor this out to a TaggingScheme abstraction.
|
| Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
|
| @@ -165,10 +868,8 @@ void SimplifiedLowering::DoChangeFloat64ToTagged(Node* node, Node* effect,
|
|
|
| void SimplifiedLowering::DoChangeBoolToBit(Node* node, Node* effect,
|
| Node* control) {
|
| - Node* val = node->InputAt(0);
|
| - Operator* op =
|
| - kPointerSize == 8 ? machine()->Word64Equal() : machine()->Word32Equal();
|
| - Node* cmp = graph()->NewNode(op, val, jsgraph()->TrueConstant());
|
| + Node* cmp = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
|
| + jsgraph()->TrueConstant());
|
| node->ReplaceUses(cmp);
|
| }
|
|
|
| @@ -204,7 +905,7 @@ static WriteBarrierKind ComputeWriteBarrierKind(
|
| }
|
|
|
|
|
| -void SimplifiedLowering::DoLoadField(Node* node, Node* effect, Node* control) {
|
| +void SimplifiedLowering::DoLoadField(Node* node) {
|
| const FieldAccess& access = FieldAccessOf(node->op());
|
| node->set_op(machine_.Load(access.representation));
|
| Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
|
| @@ -212,7 +913,7 @@ void SimplifiedLowering::DoLoadField(Node* node, Node* effect, Node* control) {
|
| }
|
|
|
|
|
| -void SimplifiedLowering::DoStoreField(Node* node, Node* effect, Node* control) {
|
| +void SimplifiedLowering::DoStoreField(Node* node) {
|
| const FieldAccess& access = FieldAccessOf(node->op());
|
| WriteBarrierKind kind = ComputeWriteBarrierKind(
|
| access.base_is_tagged, access.representation, access.type);
|
| @@ -252,21 +953,19 @@ Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
|
| }
|
| int fixed_offset = access.header_size - access.tag();
|
| if (fixed_offset == 0) return index;
|
| - return graph()->NewNode(machine()->Int32Add(),
|
| - jsgraph()->Int32Constant(fixed_offset), index);
|
| + return graph()->NewNode(machine()->Int32Add(), index,
|
| + jsgraph()->Int32Constant(fixed_offset));
|
| }
|
|
|
|
|
| -void SimplifiedLowering::DoLoadElement(Node* node, Node* effect,
|
| - Node* control) {
|
| +void SimplifiedLowering::DoLoadElement(Node* node) {
|
| const ElementAccess& access = ElementAccessOf(node->op());
|
| node->set_op(machine_.Load(access.representation));
|
| node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
|
| }
|
|
|
|
|
| -void SimplifiedLowering::DoStoreElement(Node* node, Node* effect,
|
| - Node* control) {
|
| +void SimplifiedLowering::DoStoreElement(Node* node) {
|
| const ElementAccess& access = ElementAccessOf(node->op());
|
| WriteBarrierKind kind = ComputeWriteBarrierKind(
|
| access.base_is_tagged, access.representation, access.type);
|
| @@ -275,63 +974,37 @@ void SimplifiedLowering::DoStoreElement(Node* node, Node* effect,
|
| }
|
|
|
|
|
| -void SimplifiedLowering::Lower(Node* node) {
|
| - Node* start = graph()->start();
|
| +void SimplifiedLowering::Lower(Node* node) {}
|
| +
|
| +
|
| +void SimplifiedLowering::LowerChange(Node* node, Node* effect, Node* control) {
|
| switch (node->opcode()) {
|
| - case IrOpcode::kBooleanNot:
|
| - case IrOpcode::kNumberEqual:
|
| - case IrOpcode::kNumberLessThan:
|
| - case IrOpcode::kNumberLessThanOrEqual:
|
| - case IrOpcode::kNumberAdd:
|
| - case IrOpcode::kNumberSubtract:
|
| - case IrOpcode::kNumberMultiply:
|
| - case IrOpcode::kNumberDivide:
|
| - case IrOpcode::kNumberModulus:
|
| - case IrOpcode::kNumberToInt32:
|
| - case IrOpcode::kNumberToUint32:
|
| - case IrOpcode::kReferenceEqual:
|
| - case IrOpcode::kStringEqual:
|
| - case IrOpcode::kStringLessThan:
|
| - case IrOpcode::kStringLessThanOrEqual:
|
| - case IrOpcode::kStringAdd:
|
| - break;
|
| case IrOpcode::kChangeTaggedToInt32:
|
| - DoChangeTaggedToUI32(node, start, start, true);
|
| + DoChangeTaggedToUI32(node, effect, control, true);
|
| break;
|
| case IrOpcode::kChangeTaggedToUint32:
|
| - DoChangeTaggedToUI32(node, start, start, false);
|
| + DoChangeTaggedToUI32(node, effect, control, false);
|
| break;
|
| case IrOpcode::kChangeTaggedToFloat64:
|
| - DoChangeTaggedToFloat64(node, start, start);
|
| + DoChangeTaggedToFloat64(node, effect, control);
|
| break;
|
| case IrOpcode::kChangeInt32ToTagged:
|
| - DoChangeUI32ToTagged(node, start, start, true);
|
| + DoChangeUI32ToTagged(node, effect, control, true);
|
| break;
|
| case IrOpcode::kChangeUint32ToTagged:
|
| - DoChangeUI32ToTagged(node, start, start, false);
|
| + DoChangeUI32ToTagged(node, effect, control, false);
|
| break;
|
| case IrOpcode::kChangeFloat64ToTagged:
|
| - DoChangeFloat64ToTagged(node, start, start);
|
| + DoChangeFloat64ToTagged(node, effect, control);
|
| break;
|
| case IrOpcode::kChangeBoolToBit:
|
| - DoChangeBoolToBit(node, start, start);
|
| + DoChangeBoolToBit(node, effect, control);
|
| break;
|
| case IrOpcode::kChangeBitToBool:
|
| - DoChangeBitToBool(node, start, start);
|
| - break;
|
| - case IrOpcode::kLoadField:
|
| - DoLoadField(node, start, start);
|
| - break;
|
| - case IrOpcode::kStoreField:
|
| - DoStoreField(node, start, start);
|
| - break;
|
| - case IrOpcode::kLoadElement:
|
| - DoLoadElement(node, start, start);
|
| - break;
|
| - case IrOpcode::kStoreElement:
|
| - DoStoreElement(node, start, start);
|
| + DoChangeBitToBool(node, effect, control);
|
| break;
|
| default:
|
| + UNREACHABLE();
|
| break;
|
| }
|
| }
|
|
|