Chromium Code Reviews| Index: runtime/vm/flow_graph_compiler_x64.cc |
| =================================================================== |
| --- runtime/vm/flow_graph_compiler_x64.cc (revision 17107) |
| +++ runtime/vm/flow_graph_compiler_x64.cc (working copy) |
| @@ -1,4 +1,4 @@ |
| -// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| +// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| @@ -1295,7 +1295,7 @@ |
| } |
| -void FlowGraphCompiler::LoadDoubleOrSmiToXmm(XmmRegister result, |
| +void FlowGraphCompiler::LoadDoubleOrSmiToFpu(XmmRegister result, |
|
Ivan Posva
2013/01/16 01:17:56
ditto
regis
2013/01/16 01:55:07
Done.
|
| Register reg, |
| Register temp, |
| Label* not_double_or_smi) { |
| @@ -1316,7 +1316,7 @@ |
| void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { |
| // TODO(vegorov): consider saving only caller save (volatile) registers. |
| - const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); |
| + const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count(); |
| if (xmm_regs_count > 0) { |
| __ subq(RSP, Immediate(xmm_regs_count * kDoubleSize)); |
| // Store XMM registers with the lowest register number at the lowest |
| @@ -1324,7 +1324,7 @@ |
| intptr_t offset = 0; |
| for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { |
| XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); |
| - if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { |
| + if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) { |
| __ movsd(Address(RSP, offset), xmm_reg); |
| offset += kDoubleSize; |
| } |
| @@ -1353,13 +1353,13 @@ |
| } |
| } |
| - const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); |
| + const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count(); |
| if (xmm_regs_count > 0) { |
| // XMM registers have the lowest register number at the lowest address. |
| intptr_t offset = 0; |
| for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { |
| XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); |
| - if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { |
| + if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) { |
| __ movsd(xmm_reg, Address(RSP, offset)); |
| offset += kDoubleSize; |
| } |
| @@ -1370,6 +1370,199 @@ |
| } |
| +struct CidTarget { |
| + intptr_t cid; |
| + Function* target; |
| + intptr_t count; |
| + CidTarget(intptr_t cid_arg, |
| + Function* target_arg, |
| + intptr_t count_arg) |
| + : cid(cid_arg), target(target_arg), count(count_arg) {} |
| +}; |
| + |
| + |
| +// Returns 'sorted' array in decreasing count order. |
| +// The expected number of elements to sort is less than 10. |
| +static void SortICDataByCount(const ICData& ic_data, |
| + GrowableArray<CidTarget>* sorted) { |
| + ASSERT(ic_data.num_args_tested() == 1); |
| + const intptr_t len = ic_data.NumberOfChecks(); |
| + sorted->Clear(); |
| + |
| + for (int i = 0; i < len; i++) { |
| + sorted->Add(CidTarget(ic_data.GetReceiverClassIdAt(i), |
| + &Function::ZoneHandle(ic_data.GetTargetAt(i)), |
| + ic_data.GetCountAt(i))); |
| + } |
| + for (int i = 0; i < len; i++) { |
| + intptr_t largest_ix = i; |
| + for (int k = i + 1; k < len; k++) { |
| + if ((*sorted)[largest_ix].count < (*sorted)[k].count) { |
| + largest_ix = k; |
| + } |
| + } |
| + if (i != largest_ix) { |
| + // Swap. |
| + CidTarget temp = (*sorted)[i]; |
| + (*sorted)[i] = (*sorted)[largest_ix]; |
| + (*sorted)[largest_ix] = temp; |
| + } |
| + } |
| +} |
| + |
| + |
| +void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data, |
| + Register class_id_reg, |
| + intptr_t arg_count, |
| + const Array& arg_names, |
| + Label* deopt, |
| + intptr_t deopt_id, |
| + intptr_t token_index, |
| + LocationSummary* locs) { |
| + ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0)); |
| + Label match_found; |
| + const intptr_t len = ic_data.NumberOfChecks(); |
| + GrowableArray<CidTarget> sorted(len); |
| + SortICDataByCount(ic_data, &sorted); |
| + for (intptr_t i = 0; i < len; i++) { |
| + const bool is_last_check = (i == (len - 1)); |
| + Label next_test; |
| + assembler()->cmpl(class_id_reg, Immediate(sorted[i].cid)); |
| + if (is_last_check) { |
| + assembler()->j(NOT_EQUAL, deopt); |
| + } else { |
| + assembler()->j(NOT_EQUAL, &next_test); |
| + } |
| + GenerateStaticCall(deopt_id, |
| + token_index, |
| + *sorted[i].target, |
| + arg_count, |
| + arg_names, |
| + locs); |
| + if (!is_last_check) { |
| + assembler()->jmp(&match_found); |
| + } |
| + assembler()->Bind(&next_test); |
| + } |
| + assembler()->Bind(&match_found); |
| +} |
| + |
| + |
| +void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition, |
| + FpuRegister left, |
| + FpuRegister right, |
| + BranchInstr* branch) { |
| + ASSERT(branch != NULL); |
| + assembler()->comisd(left, right); |
| + BlockEntryInstr* nan_result = (true_condition == NOT_EQUAL) ? |
| + branch->true_successor() : branch->false_successor(); |
| + assembler()->j(PARITY_EVEN, GetBlockLabel(nan_result)); |
| + branch->EmitBranchOnCondition(this, true_condition); |
| +} |
| + |
| + |
| + |
| +void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition, |
| + FpuRegister left, |
| + FpuRegister right, |
| + Register result) { |
| + assembler()->comisd(left, right); |
| + Label is_false, is_true, done; |
| + assembler()->j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN false; |
| + assembler()->j(true_condition, &is_true, Assembler::kNearJump); |
| + assembler()->Bind(&is_false); |
| + assembler()->LoadObject(result, Bool::False()); |
| + assembler()->jmp(&done); |
| + assembler()->Bind(&is_true); |
| + assembler()->LoadObject(result, Bool::True()); |
| + assembler()->Bind(&done); |
| +} |
| + |
| + |
| +Condition FlowGraphCompiler::FlipCondition(Condition condition) { |
| + switch (condition) { |
| + case EQUAL: return EQUAL; |
| + case NOT_EQUAL: return NOT_EQUAL; |
| + case LESS: return GREATER; |
| + case LESS_EQUAL: return GREATER_EQUAL; |
| + case GREATER: return LESS; |
| + case GREATER_EQUAL: return LESS_EQUAL; |
| + case BELOW: return ABOVE; |
| + case BELOW_EQUAL: return ABOVE_EQUAL; |
| + case ABOVE: return BELOW; |
| + case ABOVE_EQUAL: return BELOW_EQUAL; |
| + default: |
| + UNIMPLEMENTED(); |
| + return EQUAL; |
| + } |
| +} |
| + |
| + |
| +bool FlowGraphCompiler::EvaluateCondition(Condition condition, |
| + intptr_t left, |
| + intptr_t right) { |
| + const uintptr_t unsigned_left = static_cast<uintptr_t>(left); |
| + const uintptr_t unsigned_right = static_cast<uintptr_t>(right); |
| + switch (condition) { |
| + case EQUAL: return left == right; |
| + case NOT_EQUAL: return left != right; |
| + case LESS: return left < right; |
| + case LESS_EQUAL: return left <= right; |
| + case GREATER: return left > right; |
| + case GREATER_EQUAL: return left >= right; |
| + case BELOW: return unsigned_left < unsigned_right; |
| + case BELOW_EQUAL: return unsigned_left <= unsigned_right; |
| + case ABOVE: return unsigned_left > unsigned_right; |
| + case ABOVE_EQUAL: return unsigned_left >= unsigned_right; |
| + default: |
| + UNIMPLEMENTED(); |
| + return false; |
| + } |
| +} |
| + |
| + |
| +FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid, |
| + Register array, |
| + intptr_t index) { |
| + const int64_t disp = |
| + static_cast<int64_t>(index) * ElementSizeFor(cid) + DataOffsetFor(cid); |
| + ASSERT(Utils::IsInt(32, disp)); |
| + return FieldAddress(array, static_cast<int32_t>(disp)); |
| +} |
| + |
| + |
| +FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid, |
| + Register array, |
| + Register index) { |
| + // Note that index is smi-tagged, (i.e, times 2) for all arrays with element |
| + // size > 1. For Uint8Array and OneByteString the index is expected to be |
| + // untagged before accessing. |
| + ASSERT(kSmiTagShift == 1); |
| + switch (cid) { |
| + case kArrayCid: |
| + case kImmutableArrayCid: |
| + return FieldAddress( |
| + array, index, TIMES_HALF_WORD_SIZE, Array::data_offset()); |
| + case kFloat32ArrayCid: |
| + return FieldAddress(array, index, TIMES_2, Float32Array::data_offset()); |
| + case kFloat64ArrayCid: |
| + return FieldAddress(array, index, TIMES_4, Float64Array::data_offset()); |
| + case kUint8ArrayCid: |
| + return FieldAddress(array, index, TIMES_1, Uint8Array::data_offset()); |
| + case kUint8ClampedArrayCid: |
| + return |
| + FieldAddress(array, index, TIMES_1, Uint8ClampedArray::data_offset()); |
| + case kOneByteStringCid: |
| + return FieldAddress(array, index, TIMES_1, OneByteString::data_offset()); |
| + case kTwoByteStringCid: |
| + return FieldAddress(array, index, TIMES_1, TwoByteString::data_offset()); |
| + default: |
| + UNIMPLEMENTED(); |
| + return FieldAddress(SPREG, 0); |
| + } |
| +} |
| + |
| + |
| #undef __ |
| #define __ compiler_->assembler()-> |
| @@ -1394,18 +1587,18 @@ |
| MoveMemoryToMemory(destination.ToStackSlotAddress(), |
| source.ToStackSlotAddress()); |
| } |
| - } else if (source.IsXmmRegister()) { |
| - if (destination.IsXmmRegister()) { |
| + } else if (source.IsFpuRegister()) { |
| + if (destination.IsFpuRegister()) { |
| // Optimization manual recommends using MOVAPS for register |
| // to register moves. |
| - __ movaps(destination.xmm_reg(), source.xmm_reg()); |
| + __ movaps(destination.fpu_reg(), source.fpu_reg()); |
| } else { |
| ASSERT(destination.IsDoubleStackSlot()); |
| - __ movsd(destination.ToStackSlotAddress(), source.xmm_reg()); |
| + __ movsd(destination.ToStackSlotAddress(), source.fpu_reg()); |
| } |
| } else if (source.IsDoubleStackSlot()) { |
| - if (destination.IsXmmRegister()) { |
| - __ movsd(destination.xmm_reg(), source.ToStackSlotAddress()); |
| + if (destination.IsFpuRegister()) { |
| + __ movsd(destination.fpu_reg(), source.ToStackSlotAddress()); |
| } else { |
| ASSERT(destination.IsDoubleStackSlot()); |
| __ movsd(XMM0, source.ToStackSlotAddress()); |
| @@ -1443,15 +1636,15 @@ |
| Exchange(destination.reg(), source.ToStackSlotAddress()); |
| } else if (source.IsStackSlot() && destination.IsStackSlot()) { |
| Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress()); |
| - } else if (source.IsXmmRegister() && destination.IsXmmRegister()) { |
| - __ movaps(XMM0, source.xmm_reg()); |
| - __ movaps(source.xmm_reg(), destination.xmm_reg()); |
| - __ movaps(destination.xmm_reg(), XMM0); |
| - } else if (source.IsXmmRegister() || destination.IsXmmRegister()) { |
| + } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { |
| + __ movaps(XMM0, source.fpu_reg()); |
| + __ movaps(source.fpu_reg(), destination.fpu_reg()); |
| + __ movaps(destination.fpu_reg(), XMM0); |
| + } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { |
| ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); |
| - XmmRegister reg = source.IsXmmRegister() ? source.xmm_reg() |
| - : destination.xmm_reg(); |
| - Address slot_address = source.IsXmmRegister() |
| + XmmRegister reg = source.IsFpuRegister() ? source.fpu_reg() |
| + : destination.fpu_reg(); |
| + Address slot_address = source.IsFpuRegister() |
| ? destination.ToStackSlotAddress() |
| : source.ToStackSlotAddress(); |