| Index: src/x64/lithium-x64.h
|
| diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
|
| index ca7831c2e7f7c7f17ea7c56face430349fcf913e..d63647c8874eeccceb6f384709b8a2066f132927 100644
|
| --- a/src/x64/lithium-x64.h
|
| +++ b/src/x64/lithium-x64.h
|
| @@ -142,12 +142,21 @@ class LCodeGen;
|
| V(MathSin) \
|
| V(MathSqrt) \
|
| V(MathTan) \
|
| + V(NullarySIMDOperation) \
|
| + V(UnarySIMDOperation) \
|
| + V(BinarySIMDOperation) \
|
| + V(TernarySIMDOperation) \
|
| + V(QuarternarySIMDOperation) \
|
| V(ModI) \
|
| V(MulI) \
|
| V(NumberTagD) \
|
| + V(Float32x4ToTagged) \
|
| + V(Int32x4ToTagged) \
|
| V(NumberTagI) \
|
| V(NumberTagU) \
|
| V(NumberUntagD) \
|
| + V(TaggedToFloat32x4) \
|
| + V(TaggedToInt32x4) \
|
| V(OsrEntry) \
|
| V(OuterContext) \
|
| V(Parameter) \
|
| @@ -847,6 +856,154 @@ class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
|
| };
|
|
|
|
|
| +class LNullarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 0, 0> {
|
| + public:
|
| + explicit LNullarySIMDOperation(BuiltinFunctionId op)
|
| + : op_(op) {
|
| + }
|
| +
|
| + BuiltinFunctionId op() const { return op_; }
|
| +
|
| + virtual Opcode opcode() const V8_OVERRIDE {
|
| + return LInstruction::kNullarySIMDOperation;
|
| + }
|
| + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
|
| + virtual const char* Mnemonic() const V8_OVERRIDE;
|
| + static LNullarySIMDOperation* cast(LInstruction* instr) {
|
| + ASSERT(instr->IsNullarySIMDOperation());
|
| + return reinterpret_cast<LNullarySIMDOperation*>(instr);
|
| + }
|
| +
|
| + DECLARE_HYDROGEN_ACCESSOR(NullarySIMDOperation)
|
| +
|
| + private:
|
| + BuiltinFunctionId op_;
|
| +};
|
| +
|
| +
|
| +class LUnarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 1, 0> {
|
| + public:
|
| + LUnarySIMDOperation(LOperand* value, BuiltinFunctionId op)
|
| + : op_(op) {
|
| + inputs_[0] = value;
|
| + }
|
| +
|
| + LOperand* value() { return inputs_[0]; }
|
| + BuiltinFunctionId op() const { return op_; }
|
| +
|
| + virtual Opcode opcode() const V8_OVERRIDE {
|
| + return LInstruction::kUnarySIMDOperation;
|
| + }
|
| + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
|
| + virtual const char* Mnemonic() const V8_OVERRIDE;
|
| + static LUnarySIMDOperation* cast(LInstruction* instr) {
|
| + ASSERT(instr->IsUnarySIMDOperation());
|
| + return reinterpret_cast<LUnarySIMDOperation*>(instr);
|
| + }
|
| +
|
| + DECLARE_HYDROGEN_ACCESSOR(UnarySIMDOperation)
|
| +
|
| + private:
|
| + BuiltinFunctionId op_;
|
| +};
|
| +
|
| +
|
| +class LBinarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 2, 0> {
|
| + public:
|
| + LBinarySIMDOperation(LOperand* left, LOperand* right, BuiltinFunctionId op)
|
| + : op_(op) {
|
| + inputs_[0] = left;
|
| + inputs_[1] = right;
|
| + }
|
| +
|
| + LOperand* left() { return inputs_[0]; }
|
| + LOperand* right() { return inputs_[1]; }
|
| + BuiltinFunctionId op() const { return op_; }
|
| +
|
| + virtual Opcode opcode() const V8_OVERRIDE {
|
| + return LInstruction::kBinarySIMDOperation;
|
| + }
|
| + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
|
| + virtual const char* Mnemonic() const V8_OVERRIDE;
|
| + static LBinarySIMDOperation* cast(LInstruction* instr) {
|
| + ASSERT(instr->IsBinarySIMDOperation());
|
| + return reinterpret_cast<LBinarySIMDOperation*>(instr);
|
| + }
|
| +
|
| + DECLARE_HYDROGEN_ACCESSOR(BinarySIMDOperation)
|
| +
|
| + private:
|
| + BuiltinFunctionId op_;
|
| +};
|
| +
|
| +
|
| +class LTernarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 3, 0> {
|
| + public:
|
| + LTernarySIMDOperation(LOperand* first, LOperand* second, LOperand* third,
|
| + BuiltinFunctionId op)
|
| + : op_(op) {
|
| + inputs_[0] = first;
|
| + inputs_[1] = second;
|
| + inputs_[2] = third;
|
| + }
|
| +
|
| + LOperand* first() { return inputs_[0]; }
|
| + LOperand* second() { return inputs_[1]; }
|
| + LOperand* third() { return inputs_[2]; }
|
| + BuiltinFunctionId op() const { return op_; }
|
| +
|
| + virtual Opcode opcode() const V8_OVERRIDE {
|
| + return LInstruction::kTernarySIMDOperation;
|
| + }
|
| + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
|
| + virtual const char* Mnemonic() const V8_OVERRIDE;
|
| + static LTernarySIMDOperation* cast(LInstruction* instr) {
|
| + ASSERT(instr->IsTernarySIMDOperation());
|
| + return reinterpret_cast<LTernarySIMDOperation*>(instr);
|
| + }
|
| +
|
| + DECLARE_HYDROGEN_ACCESSOR(TernarySIMDOperation)
|
| +
|
| + private:
|
| + BuiltinFunctionId op_;
|
| +};
|
| +
|
| +
|
| +class LQuarternarySIMDOperation V8_FINAL
|
| + : public LTemplateInstruction<1, 4, 0> {
|
| + public:
|
| + LQuarternarySIMDOperation(LOperand* x, LOperand* y, LOperand* z,
|
| + LOperand* w, BuiltinFunctionId op)
|
| + : op_(op) {
|
| + inputs_[0] = x;
|
| + inputs_[1] = y;
|
| + inputs_[2] = z;
|
| + inputs_[3] = w;
|
| + }
|
| +
|
| + LOperand* x() { return inputs_[0]; }
|
| + LOperand* y() { return inputs_[1]; }
|
| + LOperand* z() { return inputs_[2]; }
|
| + LOperand* w() { return inputs_[3]; }
|
| + BuiltinFunctionId op() const { return op_; }
|
| +
|
| + virtual Opcode opcode() const V8_OVERRIDE {
|
| + return LInstruction::kQuarternarySIMDOperation;
|
| + }
|
| + virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
|
| + virtual const char* Mnemonic() const V8_OVERRIDE;
|
| + static LQuarternarySIMDOperation* cast(LInstruction* instr) {
|
| + ASSERT(instr->IsQuarternarySIMDOperation());
|
| + return reinterpret_cast<LQuarternarySIMDOperation*>(instr);
|
| + }
|
| +
|
| + DECLARE_HYDROGEN_ACCESSOR(QuarternarySIMDOperation)
|
| +
|
| + private:
|
| + BuiltinFunctionId op_;
|
| +};
|
| +
|
| +
|
| class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
|
| public:
|
| LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
|
| @@ -2090,6 +2247,36 @@ class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
|
| };
|
|
|
|
|
| +class LFloat32x4ToTagged V8_FINAL : public LTemplateInstruction<1, 1, 1> {
|
| + public:
|
| + explicit LFloat32x4ToTagged(LOperand* value, LOperand* temp) {
|
| + inputs_[0] = value;
|
| + temps_[0] = temp;
|
| + }
|
| +
|
| + LOperand* value() { return inputs_[0]; }
|
| + LOperand* temp() { return temps_[0]; }
|
| +
|
| + DECLARE_CONCRETE_INSTRUCTION(Float32x4ToTagged, "float32x4-tag")
|
| + DECLARE_HYDROGEN_ACCESSOR(Change)
|
| +};
|
| +
|
| +
|
| +class LInt32x4ToTagged V8_FINAL : public LTemplateInstruction<1, 1, 1> {
|
| + public:
|
| + explicit LInt32x4ToTagged(LOperand* value, LOperand* temp) {
|
| + inputs_[0] = value;
|
| + temps_[0] = temp;
|
| + }
|
| +
|
| + LOperand* value() { return inputs_[0]; }
|
| + LOperand* temp() { return temps_[0]; }
|
| +
|
| + DECLARE_CONCRETE_INSTRUCTION(Int32x4ToTagged, "int32x4-tag")
|
| + DECLARE_HYDROGEN_ACCESSOR(Change)
|
| +};
|
| +
|
| +
|
| // Sometimes truncating conversion from a tagged value to an int32.
|
| class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
|
| public:
|
| @@ -2162,6 +2349,32 @@ class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
|
| };
|
|
|
|
|
| +class LTaggedToFloat32x4 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
|
| + public:
|
| + explicit LTaggedToFloat32x4(LOperand* value) {
|
| + inputs_[0] = value;
|
| + }
|
| +
|
| + LOperand* value() { return inputs_[0]; }
|
| +
|
| + DECLARE_CONCRETE_INSTRUCTION(TaggedToFloat32x4, "float32x4-untag")
|
| + DECLARE_HYDROGEN_ACCESSOR(Change);
|
| +};
|
| +
|
| +
|
| +class LTaggedToInt32x4 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
|
| + public:
|
| + explicit LTaggedToInt32x4(LOperand* value) {
|
| + inputs_[0] = value;
|
| + }
|
| +
|
| + LOperand* value() { return inputs_[0]; }
|
| +
|
| + DECLARE_CONCRETE_INSTRUCTION(TaggedToInt32x4, "int32x4-untag")
|
| + DECLARE_HYDROGEN_ACCESSOR(Change);
|
| +};
|
| +
|
| +
|
| class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
|
| public:
|
| LSmiUntag(LOperand* value, bool needs_check)
|
|
|