| Index: runtime/vm/intermediate_language_arm.cc
|
| ===================================================================
|
| --- runtime/vm/intermediate_language_arm.cc (revision 24916)
|
| +++ runtime/vm/intermediate_language_arm.cc (working copy)
|
| @@ -728,8 +728,8 @@
|
| const LocationSummary& locs,
|
| Token::Kind kind,
|
| BranchInstr* branch) {
|
| - DRegister left = locs.in(0).fpu_reg();
|
| - DRegister right = locs.in(1).fpu_reg();
|
| + QRegister left = locs.in(0).fpu_reg();
|
| + QRegister right = locs.in(1).fpu_reg();
|
|
|
| Condition true_condition = TokenKindToDoubleCondition(kind);
|
| if (branch != NULL) {
|
| @@ -1203,7 +1203,8 @@
|
| if ((representation() == kUnboxedDouble) ||
|
| (representation() == kUnboxedMint) ||
|
| (representation() == kUnboxedFloat32x4)) {
|
| - DRegister result = locs()->out().fpu_reg();
|
| + QRegister qresult = locs()->out().fpu_reg();
|
| + DRegister result = static_cast<DRegister>(qresult * 2);
|
| switch (class_id()) {
|
| case kTypedDataInt32ArrayCid:
|
| UNIMPLEMENTED();
|
| @@ -1470,17 +1471,21 @@
|
| }
|
| break;
|
| }
|
| - case kTypedDataFloat32ArrayCid:
|
| + case kTypedDataFloat32ArrayCid: {
|
| + DRegister in2 = static_cast<DRegister>(locs()->in(2).fpu_reg() * 2);
|
| // Convert to single precision.
|
| - __ vcvtsd(STMP, locs()->in(2).fpu_reg());
|
| + __ vcvtsd(STMP, in2);
|
| // Store.
|
| __ add(index.reg(), index.reg(), ShifterOperand(array));
|
| __ StoreSToOffset(STMP, index.reg(), 0);
|
| break;
|
| - case kTypedDataFloat64ArrayCid:
|
| + }
|
| + case kTypedDataFloat64ArrayCid: {
|
| + DRegister in2 = static_cast<DRegister>(locs()->in(2).fpu_reg() * 2);
|
| __ add(index.reg(), index.reg(), ShifterOperand(array));
|
| - __ StoreDToOffset(locs()->in(2).fpu_reg(), index.reg(), 0);
|
| + __ StoreDToOffset(in2, index.reg(), 0);
|
| break;
|
| + }
|
| case kTypedDataFloat32x4ArrayCid:
|
| UNIMPLEMENTED();
|
| break;
|
| @@ -2465,7 +2470,8 @@
|
| __ cmp(right, ShifterOperand(0));
|
| __ b(deopt, EQ);
|
| Register temp = locs()->temp(0).reg();
|
| - DRegister dtemp = locs()->temp(1).fpu_reg();
|
| + QRegister qtemp = locs()->temp(1).fpu_reg();
|
| + DRegister dtemp = static_cast<DRegister>(qtemp * 2);
|
| __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
|
| __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
|
|
|
| @@ -2604,8 +2610,9 @@
|
| BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
|
| compiler->AddSlowPathCode(slow_path);
|
|
|
| - Register out_reg = locs()->out().reg();
|
| - DRegister value = locs()->in(0).fpu_reg();
|
| + const Register out_reg = locs()->out().reg();
|
| + const QRegister qvalue = locs()->in(0).fpu_reg();
|
| + const DRegister value = static_cast<DRegister>(qvalue * 2);
|
|
|
| __ TryAllocate(compiler->double_class(),
|
| slow_path->entry_label(),
|
| @@ -2635,7 +2642,7 @@
|
| void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| const intptr_t value_cid = value()->Type()->ToCid();
|
| const Register value = locs()->in(0).reg();
|
| - const DRegister result = locs()->out().fpu_reg();
|
| + const DRegister result = static_cast<DRegister>(locs()->out().fpu_reg() * 2);
|
|
|
| if (value_cid == kDoubleCid) {
|
| __ LoadDFromOffset(result, value, Double::value_offset() - kHeapObjectTag);
|
| @@ -2720,9 +2727,9 @@
|
|
|
|
|
| void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| - DRegister left = locs()->in(0).fpu_reg();
|
| - DRegister right = locs()->in(1).fpu_reg();
|
| - DRegister result = locs()->out().fpu_reg();
|
| + DRegister left = static_cast<DRegister>(locs()->in(0).fpu_reg() * 2);
|
| + DRegister right = static_cast<DRegister>(locs()->in(1).fpu_reg() * 2);
|
| + DRegister result = static_cast<DRegister>(locs()->out().fpu_reg() * 2);
|
| switch (op_kind()) {
|
| case Token::kADD: __ vaddd(result, left, right); break;
|
| case Token::kSUB: __ vsubd(result, left, right); break;
|
| @@ -2953,7 +2960,9 @@
|
|
|
|
|
| void MathSqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| - __ vsqrtd(locs()->out().fpu_reg(), locs()->in(0).fpu_reg());
|
| + DRegister val = static_cast<DRegister>(locs()->in(0).fpu_reg() * 2);
|
| + DRegister result = static_cast<DRegister>(locs()->out().fpu_reg() * 2);
|
| + __ vsqrtd(result, val);
|
| }
|
|
|
|
|
| @@ -3005,7 +3014,7 @@
|
|
|
| void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| Register value = locs()->in(0).reg();
|
| - FpuRegister result = locs()->out().fpu_reg();
|
| + DRegister result = static_cast<DRegister>(locs()->out().fpu_reg() * 2);
|
| __ SmiUntag(value);
|
| __ vmovsr(STMP, value);
|
| __ vcvtdi(result, STMP);
|
| @@ -3070,7 +3079,8 @@
|
| void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| Label* deopt = compiler->AddDeoptStub(deopt_id(), kDeoptDoubleToSmi);
|
| Register result = locs()->out().reg();
|
| - DRegister value = locs()->in(0).fpu_reg();
|
| + QRegister qvalue = locs()->in(0).fpu_reg();
|
| + DRegister value = static_cast<DRegister>(qvalue * 2);
|
| __ vcvtid(STMP, value);
|
| __ vmovrs(result, STMP);
|
| // Check for overflow and that it fits into Smi.
|
| @@ -3092,8 +3102,8 @@
|
|
|
|
|
| void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
| - // DRegister value = locs()->in(0).fpu_reg();
|
| - // DRegister result = locs()->out().fpu_reg();
|
| + // QRegister value = locs()->in(0).fpu_reg();
|
| + // QRegister result = locs()->out().fpu_reg();
|
| switch (recognized_kind()) {
|
| case MethodRecognizer::kDoubleTruncate:
|
| UNIMPLEMENTED();
|
| @@ -3118,11 +3128,11 @@
|
| const intptr_t kNumTemps = 0;
|
| LocationSummary* result =
|
| new LocationSummary(InputCount(), kNumTemps, LocationSummary::kCall);
|
| - result->set_in(0, Location::FpuRegisterLocation(D0));
|
| + result->set_in(0, Location::FpuRegisterLocation(Q0));
|
| if (InputCount() == 2) {
|
| - result->set_in(1, Location::FpuRegisterLocation(D1));
|
| + result->set_in(1, Location::FpuRegisterLocation(Q1));
|
| }
|
| - result->set_out(Location::FpuRegisterLocation(D0));
|
| + result->set_out(Location::FpuRegisterLocation(Q0));
|
| return result;
|
| }
|
|
|
| @@ -3131,12 +3141,13 @@
|
| // For pow-function return NAN if exponent is NAN.
|
| Label do_call, skip_call;
|
| if (recognized_kind() == MethodRecognizer::kDoublePow) {
|
| - DRegister exp = locs()->in(1).fpu_reg();
|
| + DRegister exp = static_cast<DRegister>(locs()->in(1).fpu_reg() * 2);
|
| + DRegister result = static_cast<DRegister>(locs()->out().fpu_reg() * 2);
|
| __ vcmpd(exp, exp);
|
| __ vmstat();
|
| __ b(&do_call, VC); // NaN -> false;
|
| // Exponent is NaN, return NaN.
|
| - __ vmovd(locs()->out().fpu_reg(), exp);
|
| + __ vmovd(result, exp);
|
| __ b(&skip_call);
|
| }
|
| __ Bind(&do_call);
|
| @@ -3144,6 +3155,10 @@
|
| // ('gnueabi') float ABI for leaf runtime calls, i.e. double values
|
| // are passed and returned in vfp registers rather than in integer
|
| // register pairs.
|
| + if (InputCount() == 2) {
|
| + // Args must be in D0 and D1, so move arg from Q1(== D3:D2) to D1.
|
| + __ vmovd(D1, D2);
|
| + }
|
| __ CallRuntime(TargetFunction());
|
| __ Bind(&skip_call);
|
| }
|
|
|