| Index: runtime/vm/intermediate_language_ia32.cc
|
| diff --git a/runtime/vm/intermediate_language_ia32.cc b/runtime/vm/intermediate_language_ia32.cc
|
| index d3cf96f2d48b2e25499580adf751714da298d901..828bbd2537b523d6c829415fed40aad93fe4e36d 100644
|
| --- a/runtime/vm/intermediate_language_ia32.cc
|
| +++ b/runtime/vm/intermediate_language_ia32.cc
|
| @@ -175,15 +175,25 @@ LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Isolate* isolate,
|
| void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| // The register allocator drops constant definitions that have no uses.
|
| if (!locs()->out(0).IsInvalid()) {
|
| - XmmRegister result = locs()->out(0).fpu_reg();
|
| - if (constant_address() == 0) {
|
| - Register boxed = locs()->temp(0).reg();
|
| - __ LoadObjectSafely(boxed, value());
|
| - __ movsd(result, FieldAddress(boxed, Double::value_offset()));
|
| - } else if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0)) {
|
| - __ xorps(result, result);
|
| - } else {
|
| - __ movsd(result, Address::Absolute(constant_address()));
|
| + switch (representation()) {
|
| + case kUnboxedDouble: {
|
| + XmmRegister result = locs()->out(0).fpu_reg();
|
| + if (constant_address() == 0) {
|
| + Register boxed = locs()->temp(0).reg();
|
| + __ LoadObjectSafely(boxed, value());
|
| + __ movsd(result, FieldAddress(boxed, Double::value_offset()));
|
| + } else if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0)) {
|
| + __ xorps(result, result);
|
| + } else {
|
| + __ movsd(result, Address::Absolute(constant_address()));
|
| + }
|
| + break;
|
| + }
|
| + case kUnboxedInt32:
|
| + __ movl(locs()->out(0).reg(), Immediate(Smi::Cast(value()).Value()));
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| }
|
| }
|
| }
|
| @@ -1042,7 +1052,7 @@ LocationSummary* LoadIndexedInstr::MakeLocationSummary(Isolate* isolate,
|
| locs->set_in(0, Location::RequiresRegister());
|
| if (CanBeImmediateIndex(index(), class_id())) {
|
| // CanBeImmediateIndex must return false for unsafe smis.
|
| - locs->set_in(1, Location::Constant(index()->BoundConstant()));
|
| + locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
|
| } else {
|
| // The index is either untagged (element size == 1) or a smi (for all
|
| // element sizes > 1).
|
| @@ -1238,7 +1248,7 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Isolate* isolate,
|
| locs->set_in(0, Location::RequiresRegister());
|
| if (CanBeImmediateIndex(index(), class_id())) {
|
| // CanBeImmediateIndex must return false for unsafe smis.
|
| - locs->set_in(1, Location::Constant(index()->BoundConstant()));
|
| + locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
|
| } else {
|
| // The index is either untagged (element size == 1) or a smi (for all
|
| // element sizes > 1).
|
| @@ -2775,7 +2785,7 @@ LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Isolate* isolate,
|
| summary->set_in(0, Location::RequiresRegister());
|
| ConstantInstr* right_constant = right()->definition()->AsConstant();
|
| // The programmer only controls one bit, so the constant is safe.
|
| - summary->set_in(1, Location::Constant(right_constant->value()));
|
| + summary->set_in(1, Location::Constant(right_constant));
|
| summary->set_temp(0, Location::RequiresRegister());
|
| summary->set_out(0, Location::SameAsFirstInput());
|
| } else {
|
| @@ -3139,6 +3149,289 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| }
|
|
|
|
|
| +LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Isolate* isolate,
|
| + bool opt) const {
|
| + const intptr_t kNumInputs = 2;
|
| + if (op_kind() == Token::kTRUNCDIV) {
|
| + UNREACHABLE();
|
| + return NULL;
|
| + } else if (op_kind() == Token::kMOD) {
|
| + UNREACHABLE();
|
| + return NULL;
|
| + } else if (op_kind() == Token::kSHR) {
|
| + const intptr_t kNumTemps = 0;
|
| + LocationSummary* summary = new(isolate) LocationSummary(
|
| + isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
| + summary->set_in(0, Location::RequiresRegister());
|
| + summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), ECX));
|
| + summary->set_out(0, Location::SameAsFirstInput());
|
| + return summary;
|
| + } else if (op_kind() == Token::kSHL) {
|
| + const intptr_t kNumTemps = !IsTruncating() ? 1 : 0;
|
| + LocationSummary* summary = new(isolate) LocationSummary(
|
| + isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
| + summary->set_in(0, Location::RequiresRegister());
|
| + summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), ECX));
|
| + if (!IsTruncating()) {
|
| + summary->set_temp(0, Location::RequiresRegister());
|
| + }
|
| + summary->set_out(0, Location::SameAsFirstInput());
|
| + return summary;
|
| + } else {
|
| + const intptr_t kNumTemps = 0;
|
| + LocationSummary* summary = new(isolate) LocationSummary(
|
| + isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
| + summary->set_in(0, Location::RequiresRegister());
|
| + ConstantInstr* constant = right()->definition()->AsConstant();
|
| + if (constant != NULL) {
|
| + summary->set_in(1, Location::RegisterOrSmiConstant(right()));
|
| + } else {
|
| + summary->set_in(1, Location::PrefersRegister());
|
| + }
|
| + summary->set_out(0, Location::SameAsFirstInput());
|
| + return summary;
|
| + }
|
| +}
|
| +
|
| +
|
| +static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
|
| + BinaryInt32OpInstr* shift_left) {
|
| + const bool is_truncating = shift_left->IsTruncating();
|
| + const LocationSummary& locs = *shift_left->locs();
|
| + Register left = locs.in(0).reg();
|
| + Register result = locs.out(0).reg();
|
| + ASSERT(left == result);
|
| + Label* deopt = shift_left->CanDeoptimize() ?
|
| + compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp)
|
| + : NULL;
|
| + ASSERT(locs.in(1).IsConstant());
|
| +
|
| + const Object& constant = locs.in(1).constant();
|
| + ASSERT(constant.IsSmi());
|
| + // shll operation masks the count to 5 bits.
|
| + const intptr_t kCountLimit = 0x1F;
|
| + const intptr_t value = Smi::Cast(constant).Value();
|
| + if (value == 0) {
|
| + // No code needed.
|
| + } else if ((value < 0) || (value >= kCountLimit)) {
|
| + // This condition may not be known earlier in some cases because
|
| + // of constant propagation, inlining, etc.
|
| + if ((value >= kCountLimit) && is_truncating) {
|
| + __ xorl(result, result);
|
| + } else {
|
| + // Result is Mint or exception.
|
| + __ jmp(deopt);
|
| + }
|
| + } else {
|
| + if (!is_truncating) {
|
| + // Check for overflow.
|
| + Register temp = locs.temp(0).reg();
|
| + __ movl(temp, left);
|
| + __ shll(left, Immediate(value));
|
| + __ sarl(left, Immediate(value));
|
| + __ cmpl(left, temp);
|
| + __ j(NOT_EQUAL, deopt); // Overflow.
|
| + }
|
| + // Shift for result now we know there is no overflow.
|
| + __ shll(left, Immediate(value));
|
| + }
|
| +}
|
| +
|
| +
|
| +void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| + if (op_kind() == Token::kSHL) {
|
| + EmitInt32ShiftLeft(compiler, this);
|
| + return;
|
| + }
|
| +
|
| + Register left = locs()->in(0).reg();
|
| + Register result = locs()->out(0).reg();
|
| + ASSERT(left == result);
|
| + Label* deopt = NULL;
|
| + if (CanDeoptimize()) {
|
| + deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
|
| + }
|
| +
|
| + if (locs()->in(1).IsConstant()) {
|
| + const Object& constant = locs()->in(1).constant();
|
| + ASSERT(constant.IsSmi());
|
| + const intptr_t value = Smi::Cast(constant).Value();
|
| + switch (op_kind()) {
|
| + case Token::kADD:
|
| + if (value != 0) {
|
| + // Checking overflow without emitting an instruction would be wrong.
|
| + __ addl(left, Immediate(value));
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + }
|
| + break;
|
| + case Token::kSUB: {
|
| + if (value != 0) {
|
| + // Checking overflow without emitting an instruction would be wrong.
|
| + __ subl(left, Immediate(value));
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + }
|
| + break;
|
| + }
|
| + case Token::kMUL: {
|
| + if (value == 2) {
|
| + __ shll(left, Immediate(1));
|
| + } else {
|
| + __ imull(left, Immediate(value));
|
| + }
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + break;
|
| + }
|
| + case Token::kTRUNCDIV: {
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + case Token::kBIT_AND: {
|
| + // No overflow check.
|
| + __ andl(left, Immediate(value));
|
| + break;
|
| + }
|
| + case Token::kBIT_OR: {
|
| + // No overflow check.
|
| + __ orl(left, Immediate(value));
|
| + break;
|
| + }
|
| + case Token::kBIT_XOR: {
|
| + // No overflow check.
|
| + __ xorl(left, Immediate(value));
|
| + break;
|
| + }
|
| + case Token::kSHR: {
|
| + // sarl operation masks the count to 5 bits.
|
| + const intptr_t kCountLimit = 0x1F;
|
| + if (value == 0) {
|
| + // TODO(vegorov): should be handled outside.
|
| + break;
|
| + } else if (value < 0) {
|
| + // TODO(vegorov): should be handled outside.
|
| + __ jmp(deopt);
|
| + break;
|
| + }
|
| +
|
| + if (value >= kCountLimit) {
|
| + __ sarl(left, Immediate(kCountLimit));
|
| + } else {
|
| + __ sarl(left, Immediate(value));
|
| + }
|
| +
|
| + break;
|
| + }
|
| +
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + return;
|
| + } // if locs()->in(1).IsConstant()
|
| +
|
| + if (locs()->in(1).IsStackSlot()) {
|
| + const Address& right = locs()->in(1).ToStackSlotAddress();
|
| + switch (op_kind()) {
|
| + case Token::kADD: {
|
| + __ addl(left, right);
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + break;
|
| + }
|
| + case Token::kSUB: {
|
| + __ subl(left, right);
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + break;
|
| + }
|
| + case Token::kMUL: {
|
| + __ imull(left, right);
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + break;
|
| + }
|
| + case Token::kBIT_AND: {
|
| + // No overflow check.
|
| + __ andl(left, right);
|
| + break;
|
| + }
|
| + case Token::kBIT_OR: {
|
| + // No overflow check.
|
| + __ orl(left, right);
|
| + break;
|
| + }
|
| + case Token::kBIT_XOR: {
|
| + // No overflow check.
|
| + __ xorl(left, right);
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| + return;
|
| + } // if locs()->in(1).IsStackSlot.
|
| +
|
| + // if locs()->in(1).IsRegister.
|
| + Register right = locs()->in(1).reg();
|
| + switch (op_kind()) {
|
| + case Token::kADD: {
|
| + __ addl(left, right);
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + break;
|
| + }
|
| + case Token::kSUB: {
|
| + __ subl(left, right);
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + break;
|
| + }
|
| + case Token::kMUL: {
|
| + __ imull(left, right);
|
| + if (deopt != NULL) __ j(OVERFLOW, deopt);
|
| + break;
|
| + }
|
| + case Token::kBIT_AND: {
|
| + // No overflow check.
|
| + __ andl(left, right);
|
| + break;
|
| + }
|
| + case Token::kBIT_OR: {
|
| + // No overflow check.
|
| + __ orl(left, right);
|
| + break;
|
| + }
|
| + case Token::kBIT_XOR: {
|
| + // No overflow check.
|
| + __ xorl(left, right);
|
| + break;
|
| + }
|
| + case Token::kTRUNCDIV: {
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + case Token::kMOD: {
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + case Token::kSHR: {
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + case Token::kDIV: {
|
| + // Dispatches to 'Double./'.
|
| + // TODO(srdjan): Implement as conversion to double and double division.
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + case Token::kOR:
|
| + case Token::kAND: {
|
| + // Flow graph builder has dissected this operation to guarantee correct
|
| + // behavior (short-circuit evaluation).
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| +}
|
| +
|
| +
|
| LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Isolate* isolate,
|
| bool opt) const {
|
| intptr_t left_cid = left()->Type()->ToCid();
|
| @@ -4673,6 +4966,25 @@ void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| }
|
|
|
|
|
| +LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Isolate* isolate,
|
| + bool opt) const {
|
| + const intptr_t kNumInputs = 1;
|
| + const intptr_t kNumTemps = 0;
|
| + LocationSummary* result = new(isolate) LocationSummary(
|
| + isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
| + result->set_in(0, Location::WritableRegister());
|
| + result->set_out(0, Location::RequiresFpuRegister());
|
| + return result;
|
| +}
|
| +
|
| +
|
| +void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| + Register value = locs()->in(0).reg();
|
| + FpuRegister result = locs()->out(0).fpu_reg();
|
| + __ cvtsi2sd(result, value);
|
| +}
|
| +
|
| +
|
| LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Isolate* isolate,
|
| bool opt) const {
|
| const intptr_t kNumInputs = 1;
|
| @@ -5910,16 +6222,6 @@ CompileType UnaryUint32OpInstr::ComputeType() const {
|
| }
|
|
|
|
|
| -CompileType BoxUint32Instr::ComputeType() const {
|
| - return CompileType::Int();
|
| -}
|
| -
|
| -
|
| -CompileType UnboxUint32Instr::ComputeType() const {
|
| - return CompileType::Int();
|
| -}
|
| -
|
| -
|
| LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Isolate* isolate,
|
| bool opt) const {
|
| const intptr_t kNumInputs = 2;
|
| @@ -6092,9 +6394,14 @@ LocationSummary* BoxUint32Instr::MakeLocationSummary(Isolate* isolate,
|
| const intptr_t kNumInputs = 1;
|
| const intptr_t kNumTemps = 0;
|
| LocationSummary* summary = new(isolate) LocationSummary(
|
| - isolate, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
|
| + isolate,
|
| + kNumInputs,
|
| + kNumTemps,
|
| + ValueFitsSmi() ? LocationSummary::kNoCall
|
| + : LocationSummary::kCallOnSlowPath);
|
| summary->set_in(0, Location::RequiresRegister());
|
| - summary->set_out(0, Location::RequiresRegister());
|
| + summary->set_out(0, ValueFitsSmi() ? Location::SameAsFirstInput()
|
| + : Location::RequiresRegister());
|
| return summary;
|
| }
|
|
|
| @@ -6102,27 +6409,68 @@ LocationSummary* BoxUint32Instr::MakeLocationSummary(Isolate* isolate,
|
| void BoxUint32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| Register value = locs()->in(0).reg();
|
| Register out = locs()->out(0).reg();
|
| - ASSERT(value != out);
|
|
|
| Label not_smi, done;
|
|
|
| - // TODO(johnmccutchan): Use range information to fast path smi / mint boxing.
|
| - // Test if this value is <= kSmiMax.
|
| - __ cmpl(value, Immediate(kSmiMax));
|
| - __ j(ABOVE, ¬_smi);
|
| - // Smi.
|
| - __ movl(out, value);
|
| - __ SmiTag(out);
|
| - __ jmp(&done);
|
| - __ Bind(¬_smi);
|
| - // Allocate a mint.
|
| - BoxAllocationSlowPath::Allocate(
|
| - compiler, this, compiler->mint_class(), out, kNoRegister);
|
| - // Copy low word into mint.
|
| - __ movl(FieldAddress(out, Mint::value_offset()), value);
|
| - // Zero high word.
|
| - __ movl(FieldAddress(out, Mint::value_offset() + kWordSize), Immediate(0));
|
| - __ Bind(&done);
|
| + if (ValueFitsSmi()) {
|
| + ASSERT(value == out);
|
| + __ SmiTag(value);
|
| + } else {
|
| + ASSERT(value != out);
|
| + __ cmpl(value, Immediate(kSmiMax));
|
| + __ j(ABOVE, ¬_smi);
|
| + // Smi.
|
| + __ movl(out, value);
|
| + __ SmiTag(out);
|
| + __ jmp(&done);
|
| + __ Bind(¬_smi);
|
| + // Allocate a mint.
|
| + BoxAllocationSlowPath::Allocate(
|
| + compiler, this, compiler->mint_class(), out, kNoRegister);
|
| + // Copy low word into mint.
|
| + __ movl(FieldAddress(out, Mint::value_offset()), value);
|
| + // Zero high word.
|
| + __ movl(FieldAddress(out, Mint::value_offset() + kWordSize), Immediate(0));
|
| + __ Bind(&done);
|
| + }
|
| +}
|
| +
|
| +
|
| +LocationSummary* BoxInt32Instr::MakeLocationSummary(Isolate* isolate,
|
| + bool opt) const {
|
| + const intptr_t kNumInputs = 1;
|
| + const intptr_t kNumTemps = 0;
|
| + LocationSummary* summary = new(isolate) LocationSummary(
|
| + isolate, kNumInputs, kNumTemps,
|
| + ValueFitsSmi() ? LocationSummary::kNoCall
|
| + : LocationSummary::kCallOnSlowPath);
|
| + summary->set_in(0, ValueFitsSmi() ? Location::RequiresRegister()
|
| + : Location::WritableRegister());
|
| + summary->set_out(0, ValueFitsSmi() ? Location::SameAsFirstInput()
|
| + : Location::RequiresRegister());
|
| + return summary;
|
| +}
|
| +
|
| +
|
| +void BoxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| + Register value = locs()->in(0).reg();
|
| + Register out = locs()->out(0).reg();
|
| +
|
| + if (out != value) {
|
| + __ movl(out, value);
|
| + }
|
| + __ shll(out, Immediate(1));
|
| + if (!ValueFitsSmi()) {
|
| + Label done;
|
| + __ j(NO_OVERFLOW, &done);
|
| + // Allocate a mint.
|
| + BoxAllocationSlowPath::Allocate(
|
| + compiler, this, compiler->mint_class(), out, kNoRegister);
|
| + __ movl(FieldAddress(out, Mint::value_offset()), value);
|
| + __ sarl(value, Immediate(31)); // Sign extend.
|
| + __ movl(FieldAddress(out, Mint::value_offset() + kWordSize), value);
|
| + __ Bind(&done);
|
| + }
|
| }
|
|
|
|
|
| @@ -6171,35 +6519,142 @@ void UnboxUint32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| }
|
|
|
|
|
| +LocationSummary* UnboxInt32Instr::MakeLocationSummary(Isolate* isolate,
|
| + bool opt) const {
|
| + const intptr_t value_cid = value()->Type()->ToCid();
|
| + const intptr_t kNumInputs = 1;
|
| + const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
|
| + LocationSummary* summary = new(isolate) LocationSummary(
|
| + isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
| + summary->set_in(0, Location::RequiresRegister());
|
| + if (kNumTemps > 0) {
|
| + summary->set_temp(0, Location::RequiresRegister());
|
| + }
|
| + summary->set_out(0, (value_cid == kSmiCid) ? Location::SameAsFirstInput()
|
| + : Location::RequiresRegister());
|
| + return summary;
|
| +}
|
| +
|
| +
|
| +static void LoadInt32FromMint(FlowGraphCompiler* compiler,
|
| + Register mint,
|
| + Register result,
|
| + Register temp,
|
| + Label* deopt) {
|
| + __ movl(result, FieldAddress(mint, Mint::value_offset()));
|
| + if (deopt != NULL) {
|
| + __ movl(temp, result);
|
| + __ sarl(temp, Immediate(31));
|
| + __ cmpl(temp, FieldAddress(mint, Mint::value_offset() + kWordSize));
|
| + __ j(NOT_EQUAL, deopt);
|
| + }
|
| +}
|
| +
|
| +
|
| +void UnboxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| + const intptr_t value_cid = value()->Type()->ToCid();
|
| + const Register value = locs()->in(0).reg();
|
| + const Register result = locs()->out(0).reg();
|
| +
|
| + // TODO(johnmccutchan): Emit better code for constant inputs.
|
| + if (value_cid == kMintCid) {
|
| + Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
|
| + Label* deopt = CanDeoptimize() ?
|
| + compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
|
| + LoadInt32FromMint(compiler,
|
| + value,
|
| + result,
|
| + temp,
|
| + deopt);
|
| + } else if (value_cid == kSmiCid) {
|
| + ASSERT(value == result);
|
| + __ SmiUntag(value);
|
| + } else {
|
| + Register temp = locs()->temp(0).reg();
|
| + Label* deopt = compiler->AddDeoptStub(deopt_id_,
|
| + ICData::kDeoptUnboxInteger);
|
| + Label is_smi, done;
|
| + __ testl(value, Immediate(kSmiTagMask));
|
| + __ j(ZERO, &is_smi);
|
| + __ CompareClassId(value, kMintCid, temp);
|
| + __ j(NOT_EQUAL, deopt);
|
| + LoadInt32FromMint(compiler,
|
| + value,
|
| + result,
|
| + temp,
|
| + deopt);
|
| + __ movl(value, FieldAddress(value, Mint::value_offset()));
|
| + __ jmp(&done);
|
| + __ Bind(&is_smi);
|
| + __ SmiUntag(value);
|
| + __ Bind(&done);
|
| + }
|
| +}
|
| +
|
| +
|
| LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Isolate* isolate,
|
| bool opt) const {
|
| const intptr_t kNumInputs = 1;
|
| const intptr_t kNumTemps = 0;
|
| LocationSummary* summary = new(isolate) LocationSummary(
|
| isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
| - if (from() == kUnboxedMint) {
|
| - summary->set_in(0, Location::Pair(Location::RequiresRegister(),
|
| - Location::RequiresRegister()));
|
| + if ((from() == kUnboxedInt32 || from() == kUnboxedUint32) &&
|
| + (to() == kUnboxedInt32 || to() == kUnboxedUint32)) {
|
| + summary->set_in(0, Location::RequiresRegister());
|
| + summary->set_out(0, Location::SameAsFirstInput());
|
| + } else if (from() == kUnboxedMint) {
|
| + summary->set_in(0, Location::Pair(
|
| + CanDeoptimize() ? Location::WritableRegister()
|
| + : Location::RequiresRegister(),
|
| + Location::RequiresRegister()));
|
| summary->set_out(0, Location::RequiresRegister());
|
| - } else {
|
| - ASSERT(from() == kUnboxedUint32);
|
| + } else if (from() == kUnboxedUint32) {
|
| summary->set_in(0, Location::RequiresRegister());
|
| summary->set_out(0, Location::Pair(Location::RequiresRegister(),
|
| Location::RequiresRegister()));
|
| + } else if (from() == kUnboxedInt32) {
|
| + summary->set_in(0, Location::RegisterLocation(EAX));
|
| + summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
|
| + Location::RegisterLocation(EDX)));
|
| }
|
| return summary;
|
| }
|
|
|
|
|
| void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| - if (from() == kUnboxedMint) {
|
| + if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
|
| + // Representations are bitwise equivalent.
|
| + ASSERT(locs()->out(0).reg() == locs()->in(0).reg());
|
| + } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
|
| + // Representations are bitwise equivalent.
|
| + ASSERT(locs()->out(0).reg() == locs()->in(0).reg());
|
| + if (CanDeoptimize()) {
|
| + Label* deopt =
|
| + compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
|
| + __ testl(locs()->out(0).reg(), locs()->out(0).reg());
|
| + __ j(NEGATIVE, deopt);
|
| + }
|
| + } else if (from() == kUnboxedMint) {
|
| + // TODO(vegorov) kUnboxedMint -> kInt32 conversion is currently usually
|
| + // dominated by a CheckSmi(BoxInteger(val)) which is an artifact of ordering
|
| + // of optimization passes and the way we check smi-ness of values.
|
| + // Optimize it away.
|
| + ASSERT(to() == kUnboxedInt32 || to() == kUnboxedUint32);
|
| PairLocation* in_pair = locs()->in(0).AsPairLocation();
|
| Register in_lo = in_pair->At(0).reg();
|
| + Register in_hi = in_pair->At(1).reg();
|
| Register out = locs()->out(0).reg();
|
| // Copy low word.
|
| __ movl(out, in_lo);
|
| - } else {
|
| - ASSERT(from() == kUnboxedUint32);
|
| + if (CanDeoptimize()) {
|
| + Label* deopt =
|
| + compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
|
| + __ sarl(in_lo, Immediate(31));
|
| + __ cmpl(in_lo, in_hi);
|
| + __ j(NOT_EQUAL, deopt);
|
| + }
|
| + } else if (from() == kUnboxedUint32) {
|
| + ASSERT(to() == kUnboxedMint);
|
| Register in = locs()->in(0).reg();
|
| PairLocation* out_pair = locs()->out(0).AsPairLocation();
|
| Register out_lo = out_pair->At(0).reg();
|
| @@ -6208,6 +6663,16 @@ void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| __ movl(out_lo, in);
|
| // Zero upper word.
|
| __ xorl(out_hi, out_hi);
|
| + } else if (from() == kUnboxedInt32) {
|
| + ASSERT(to() == kUnboxedMint);
|
| + PairLocation* out_pair = locs()->out(0).AsPairLocation();
|
| + Register out_lo = out_pair->At(0).reg();
|
| + Register out_hi = out_pair->At(1).reg();
|
| + ASSERT(locs()->in(0).reg() == EAX);
|
| + ASSERT(out_lo == EAX && out_hi == EDX);
|
| + __ cdq();
|
| + } else {
|
| + UNREACHABLE();
|
| }
|
| }
|
|
|
|
|