Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(161)

Side by Side Diff: runtime/vm/intermediate_language_x64.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
11 #include "vm/dart_entry.h"
11 #include "vm/flow_graph_compiler.h" 12 #include "vm/flow_graph_compiler.h"
12 #include "vm/locations.h" 13 #include "vm/locations.h"
13 #include "vm/object_store.h" 14 #include "vm/object_store.h"
14 #include "vm/parser.h" 15 #include "vm/parser.h"
15 #include "vm/stub_code.h" 16 #include "vm/stub_code.h"
16 #include "vm/symbols.h" 17 #include "vm/symbols.h"
17 18
18 #define __ compiler->assembler()-> 19 #define __ compiler->assembler()->
19 20
20 namespace dart { 21 namespace dart {
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after
267 268
268 269
269 LocationSummary* EqualityCompareInstr::MakeLocationSummary() const { 270 LocationSummary* EqualityCompareInstr::MakeLocationSummary() const {
270 const intptr_t kNumInputs = 2; 271 const intptr_t kNumInputs = 2;
271 const bool is_checked_strict_equal = 272 const bool is_checked_strict_equal =
272 HasICData() && ic_data()->AllTargetsHaveSameOwner(kInstanceCid); 273 HasICData() && ic_data()->AllTargetsHaveSameOwner(kInstanceCid);
273 if (receiver_class_id() == kDoubleCid) { 274 if (receiver_class_id() == kDoubleCid) {
274 const intptr_t kNumTemps = 0; 275 const intptr_t kNumTemps = 0;
275 LocationSummary* locs = 276 LocationSummary* locs =
276 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 277 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
277 locs->set_in(0, Location::RequiresXmmRegister()); 278 locs->set_in(0, Location::RequiresFpuRegister());
278 locs->set_in(1, Location::RequiresXmmRegister()); 279 locs->set_in(1, Location::RequiresFpuRegister());
279 locs->set_out(Location::RequiresRegister()); 280 locs->set_out(Location::RequiresRegister());
280 return locs; 281 return locs;
281 } 282 }
282 if (receiver_class_id() == kSmiCid) { 283 if (receiver_class_id() == kSmiCid) {
283 const intptr_t kNumTemps = 0; 284 const intptr_t kNumTemps = 0;
284 LocationSummary* locs = 285 LocationSummary* locs =
285 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 286 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
286 locs->set_in(0, Location::RegisterOrConstant(left())); 287 locs->set_in(0, Location::RegisterOrConstant(left()));
287 // Only one input can be a constant operand. The case of two constant 288 // Only one input can be a constant operand. The case of two constant
288 // operands should be handled by constant propagation. 289 // operands should be handled by constant propagation.
(...skipping 365 matching lines...) Expand 10 before | Expand all | Expand 10 after
654 UNREACHABLE(); 655 UNREACHABLE();
655 return OVERFLOW; 656 return OVERFLOW;
656 } 657 }
657 } 658 }
658 659
659 660
660 static void EmitDoubleComparisonOp(FlowGraphCompiler* compiler, 661 static void EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
661 const LocationSummary& locs, 662 const LocationSummary& locs,
662 Token::Kind kind, 663 Token::Kind kind,
663 BranchInstr* branch) { 664 BranchInstr* branch) {
664 XmmRegister left = locs.in(0).xmm_reg(); 665 XmmRegister left = locs.in(0).fpu_reg();
665 XmmRegister right = locs.in(1).xmm_reg(); 666 XmmRegister right = locs.in(1).fpu_reg();
666 667
667 Condition true_condition = TokenKindToDoubleCondition(kind); 668 Condition true_condition = TokenKindToDoubleCondition(kind);
668 if (branch != NULL) { 669 if (branch != NULL) {
669 compiler->EmitDoubleCompareBranch( 670 compiler->EmitDoubleCompareBranch(
670 true_condition, left, right, branch); 671 true_condition, left, right, branch);
671 } else { 672 } else {
672 compiler->EmitDoubleCompareBool( 673 compiler->EmitDoubleCompareBool(
673 true_condition, left, right, locs.out().reg()); 674 true_condition, left, right, locs.out().reg());
674 } 675 }
675 } 676 }
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
757 branch->EmitBranchOnCondition(compiler, branch_condition); 758 branch->EmitBranchOnCondition(compiler, branch_condition);
758 } 759 }
759 760
760 761
761 LocationSummary* RelationalOpInstr::MakeLocationSummary() const { 762 LocationSummary* RelationalOpInstr::MakeLocationSummary() const {
762 const intptr_t kNumInputs = 2; 763 const intptr_t kNumInputs = 2;
763 const intptr_t kNumTemps = 0; 764 const intptr_t kNumTemps = 0;
764 if (operands_class_id() == kDoubleCid) { 765 if (operands_class_id() == kDoubleCid) {
765 LocationSummary* summary = 766 LocationSummary* summary =
766 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 767 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
767 summary->set_in(0, Location::RequiresXmmRegister()); 768 summary->set_in(0, Location::RequiresFpuRegister());
768 summary->set_in(1, Location::RequiresXmmRegister()); 769 summary->set_in(1, Location::RequiresFpuRegister());
769 summary->set_out(Location::RequiresRegister()); 770 summary->set_out(Location::RequiresRegister());
770 return summary; 771 return summary;
771 } else if (operands_class_id() == kSmiCid) { 772 } else if (operands_class_id() == kSmiCid) {
772 LocationSummary* summary = 773 LocationSummary* summary =
773 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 774 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
774 summary->set_in(0, Location::RegisterOrConstant(left())); 775 summary->set_in(0, Location::RegisterOrConstant(left()));
775 // Only one input can be a constant operand. The case of two constant 776 // Only one input can be a constant operand. The case of two constant
776 // operands should be handled by constant propagation. 777 // operands should be handled by constant propagation.
777 summary->set_in(1, summary->in(0).IsConstant() 778 summary->set_in(1, summary->in(0).IsConstant()
778 ? Location::RequiresRegister() 779 ? Location::RequiresRegister()
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after
1002 LocationSummary* LoadIndexedInstr::MakeLocationSummary() const { 1003 LocationSummary* LoadIndexedInstr::MakeLocationSummary() const {
1003 const intptr_t kNumInputs = 2; 1004 const intptr_t kNumInputs = 2;
1004 const intptr_t kNumTemps = 0; 1005 const intptr_t kNumTemps = 0;
1005 LocationSummary* locs = 1006 LocationSummary* locs =
1006 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 1007 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
1007 locs->set_in(0, Location::RequiresRegister()); 1008 locs->set_in(0, Location::RequiresRegister());
1008 locs->set_in(1, CanBeImmediateIndex(index(), class_id()) 1009 locs->set_in(1, CanBeImmediateIndex(index(), class_id())
1009 ? Location::RegisterOrSmiConstant(index()) 1010 ? Location::RegisterOrSmiConstant(index())
1010 : Location::RequiresRegister()); 1011 : Location::RequiresRegister());
1011 if (representation() == kUnboxedDouble) { 1012 if (representation() == kUnboxedDouble) {
1012 locs->set_out(Location::RequiresXmmRegister()); 1013 locs->set_out(Location::RequiresFpuRegister());
1013 } else { 1014 } else {
1014 locs->set_out(Location::RequiresRegister()); 1015 locs->set_out(Location::RequiresRegister());
1015 } 1016 }
1016 return locs; 1017 return locs;
1017 } 1018 }
1018 1019
1019 1020
1020 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1021 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1021 Register array = locs()->in(0).reg(); 1022 Register array = locs()->in(0).reg();
1022 Location index = locs()->in(1); 1023 Location index = locs()->in(1);
(...skipping 18 matching lines...) Expand all
1041 return; 1042 return;
1042 } 1043 }
1043 1044
1044 FieldAddress element_address = index.IsRegister() ? 1045 FieldAddress element_address = index.IsRegister() ?
1045 FlowGraphCompiler::ElementAddressForRegIndex( 1046 FlowGraphCompiler::ElementAddressForRegIndex(
1046 class_id(), array, index.reg()) : 1047 class_id(), array, index.reg()) :
1047 FlowGraphCompiler::ElementAddressForIntIndex( 1048 FlowGraphCompiler::ElementAddressForIntIndex(
1048 class_id(), array, Smi::Cast(index.constant()).Value()); 1049 class_id(), array, Smi::Cast(index.constant()).Value());
1049 1050
1050 if (representation() == kUnboxedDouble) { 1051 if (representation() == kUnboxedDouble) {
1051 XmmRegister result = locs()->out().xmm_reg(); 1052 XmmRegister result = locs()->out().fpu_reg();
1052 if (class_id() == kFloat32ArrayCid) { 1053 if (class_id() == kFloat32ArrayCid) {
1053 // Load single precision float. 1054 // Load single precision float.
1054 __ movss(result, element_address); 1055 __ movss(result, element_address);
1055 // Promote to double. 1056 // Promote to double.
1056 __ cvtss2sd(result, locs()->out().xmm_reg()); 1057 __ cvtss2sd(result, locs()->out().fpu_reg());
1057 } else { 1058 } else {
1058 ASSERT(class_id() == kFloat64ArrayCid); 1059 ASSERT(class_id() == kFloat64ArrayCid);
1059 __ movsd(result, element_address); 1060 __ movsd(result, element_address);
1060 } 1061 }
1061 return; 1062 return;
1062 } 1063 }
1063 1064
1064 Register result = locs()->out().reg(); 1065 Register result = locs()->out().reg();
1065 if ((class_id() == kUint8ArrayCid) || 1066 if ((class_id() == kUint8ArrayCid) ||
1066 (class_id() == kUint8ClampedArrayCid)) { 1067 (class_id() == kUint8ClampedArrayCid)) {
(...skipping 28 matching lines...) Expand all
1095 ? Location::WritableRegister() 1096 ? Location::WritableRegister()
1096 : Location::RegisterOrConstant(value())); 1097 : Location::RegisterOrConstant(value()));
1097 break; 1098 break;
1098 case kUint8ArrayCid: 1099 case kUint8ArrayCid:
1099 // TODO(fschneider): Add location constraint for byte registers (RAX, 1100 // TODO(fschneider): Add location constraint for byte registers (RAX,
1100 // RBX, RCX, RDX) instead of using a fixed register. 1101 // RBX, RCX, RDX) instead of using a fixed register.
1101 locs->set_in(2, Location::FixedRegisterOrSmiConstant(value(), RAX)); 1102 locs->set_in(2, Location::FixedRegisterOrSmiConstant(value(), RAX));
1102 break; 1103 break;
1103 case kFloat32ArrayCid: 1104 case kFloat32ArrayCid:
1104 // Need temp register for float-to-double conversion. 1105 // Need temp register for float-to-double conversion.
1105 locs->AddTemp(Location::RequiresXmmRegister()); 1106 locs->AddTemp(Location::RequiresFpuRegister());
1106 // Fall through. 1107 // Fall through.
1107 case kFloat64ArrayCid: 1108 case kFloat64ArrayCid:
1108 // TODO(srdjan): Support Float64 constants. 1109 // TODO(srdjan): Support Float64 constants.
1109 locs->set_in(2, Location::RequiresXmmRegister()); 1110 locs->set_in(2, Location::RequiresFpuRegister());
1110 break; 1111 break;
1111 default: 1112 default:
1112 UNREACHABLE(); 1113 UNREACHABLE();
1113 return NULL; 1114 return NULL;
1114 } 1115 }
1115 return locs; 1116 return locs;
1116 } 1117 }
1117 1118
1118 1119
1119 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1120 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1151 ASSERT(locs()->in(2).reg() == RAX); 1152 ASSERT(locs()->in(2).reg() == RAX);
1152 __ SmiUntag(RAX); 1153 __ SmiUntag(RAX);
1153 __ movb(element_address, RAX); 1154 __ movb(element_address, RAX);
1154 } 1155 }
1155 if (index.IsRegister()) { 1156 if (index.IsRegister()) {
1156 __ SmiTag(index.reg()); // Re-tag. 1157 __ SmiTag(index.reg()); // Re-tag.
1157 } 1158 }
1158 break; 1159 break;
1159 case kFloat32ArrayCid: 1160 case kFloat32ArrayCid:
1160 // Convert to single precision. 1161 // Convert to single precision.
1161 __ cvtsd2ss(locs()->temp(0).xmm_reg(), locs()->in(2).xmm_reg()); 1162 __ cvtsd2ss(locs()->temp(0).fpu_reg(), locs()->in(2).fpu_reg());
1162 // Store. 1163 // Store.
1163 __ movss(element_address, locs()->temp(0).xmm_reg()); 1164 __ movss(element_address, locs()->temp(0).fpu_reg());
1164 break; 1165 break;
1165 case kFloat64ArrayCid: 1166 case kFloat64ArrayCid:
1166 __ movsd(element_address, locs()->in(2).xmm_reg()); 1167 __ movsd(element_address, locs()->in(2).fpu_reg());
1167 break; 1168 break;
1168 default: 1169 default:
1169 UNREACHABLE(); 1170 UNREACHABLE();
1170 } 1171 }
1171 } 1172 }
1172 1173
1173 1174
1174 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary() const { 1175 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary() const {
1175 const intptr_t kNumInputs = 2; 1176 const intptr_t kNumInputs = 2;
1176 const intptr_t num_temps = 0; 1177 const intptr_t num_temps = 0;
(...skipping 785 matching lines...) Expand 10 before | Expand all | Expand 10 after
1962 } 1963 }
1963 1964
1964 1965
1965 LocationSummary* BoxDoubleInstr::MakeLocationSummary() const { 1966 LocationSummary* BoxDoubleInstr::MakeLocationSummary() const {
1966 const intptr_t kNumInputs = 1; 1967 const intptr_t kNumInputs = 1;
1967 const intptr_t kNumTemps = 0; 1968 const intptr_t kNumTemps = 0;
1968 LocationSummary* summary = 1969 LocationSummary* summary =
1969 new LocationSummary(kNumInputs, 1970 new LocationSummary(kNumInputs,
1970 kNumTemps, 1971 kNumTemps,
1971 LocationSummary::kCallOnSlowPath); 1972 LocationSummary::kCallOnSlowPath);
1972 summary->set_in(0, Location::RequiresXmmRegister()); 1973 summary->set_in(0, Location::RequiresFpuRegister());
1973 summary->set_out(Location::RequiresRegister()); 1974 summary->set_out(Location::RequiresRegister());
1974 return summary; 1975 return summary;
1975 } 1976 }
1976 1977
1977 1978
1978 class BoxDoubleSlowPath : public SlowPathCode { 1979 class BoxDoubleSlowPath : public SlowPathCode {
1979 public: 1980 public:
1980 explicit BoxDoubleSlowPath(BoxDoubleInstr* instruction) 1981 explicit BoxDoubleSlowPath(BoxDoubleInstr* instruction)
1981 : instruction_(instruction) { } 1982 : instruction_(instruction) { }
1982 1983
(...skipping 22 matching lines...) Expand all
2005 private: 2006 private:
2006 BoxDoubleInstr* instruction_; 2007 BoxDoubleInstr* instruction_;
2007 }; 2008 };
2008 2009
2009 2010
2010 void BoxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2011 void BoxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2011 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 2012 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
2012 compiler->AddSlowPathCode(slow_path); 2013 compiler->AddSlowPathCode(slow_path);
2013 2014
2014 Register out_reg = locs()->out().reg(); 2015 Register out_reg = locs()->out().reg();
2015 XmmRegister value = locs()->in(0).xmm_reg(); 2016 XmmRegister value = locs()->in(0).fpu_reg();
2016 2017
2017 AssemblerMacros::TryAllocate(compiler->assembler(), 2018 AssemblerMacros::TryAllocate(compiler->assembler(),
2018 compiler->double_class(), 2019 compiler->double_class(),
2019 slow_path->entry_label(), 2020 slow_path->entry_label(),
2020 Assembler::kFarJump, 2021 Assembler::kFarJump,
2021 out_reg); 2022 out_reg);
2022 __ Bind(slow_path->exit_label()); 2023 __ Bind(slow_path->exit_label());
2023 __ movsd(FieldAddress(out_reg, Double::value_offset()), value); 2024 __ movsd(FieldAddress(out_reg, Double::value_offset()), value);
2024 } 2025 }
2025 2026
2026 2027
2027 LocationSummary* UnboxDoubleInstr::MakeLocationSummary() const { 2028 LocationSummary* UnboxDoubleInstr::MakeLocationSummary() const {
2028 const intptr_t kNumInputs = 1; 2029 const intptr_t kNumInputs = 1;
2029 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0; 2030 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
2030 LocationSummary* summary = 2031 LocationSummary* summary =
2031 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2032 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2032 summary->set_in(0, Location::RequiresRegister()); 2033 summary->set_in(0, Location::RequiresRegister());
2033 if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister()); 2034 if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister());
2034 summary->set_out(Location::RequiresXmmRegister()); 2035 summary->set_out(Location::RequiresFpuRegister());
2035 return summary; 2036 return summary;
2036 } 2037 }
2037 2038
2038 2039
2039 void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2040 void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2040 const intptr_t value_cid = value()->ResultCid(); 2041 const intptr_t value_cid = value()->ResultCid();
2041 const Register value = locs()->in(0).reg(); 2042 const Register value = locs()->in(0).reg();
2042 const XmmRegister result = locs()->out().xmm_reg(); 2043 const XmmRegister result = locs()->out().fpu_reg();
2043 2044
2044 if (value_cid == kDoubleCid) { 2045 if (value_cid == kDoubleCid) {
2045 __ movsd(result, FieldAddress(value, Double::value_offset())); 2046 __ movsd(result, FieldAddress(value, Double::value_offset()));
2046 } else if (value_cid == kSmiCid) { 2047 } else if (value_cid == kSmiCid) {
2047 __ SmiUntag(value); // Untag input before conversion. 2048 __ SmiUntag(value); // Untag input before conversion.
2048 __ cvtsi2sd(result, value); 2049 __ cvtsi2sd(result, value);
2049 __ SmiTag(value); // Restore input register. 2050 __ SmiTag(value); // Restore input register.
2050 } else { 2051 } else {
2051 Label* deopt = compiler->AddDeoptStub(deopt_id_, kDeoptBinaryDoubleOp); 2052 Label* deopt = compiler->AddDeoptStub(deopt_id_, kDeoptBinaryDoubleOp);
2052 compiler->LoadDoubleOrSmiToXmm(result, 2053 compiler->LoadDoubleOrSmiToFpu(result,
2053 value, 2054 value,
2054 locs()->temp(0).reg(), 2055 locs()->temp(0).reg(),
2055 deopt); 2056 deopt);
2056 } 2057 }
2057 } 2058 }
2058 2059
2059 2060
2060 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary() const { 2061 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary() const {
2061 const intptr_t kNumInputs = 2; 2062 const intptr_t kNumInputs = 2;
2062 const intptr_t kNumTemps = 0; 2063 const intptr_t kNumTemps = 0;
2063 LocationSummary* summary = 2064 LocationSummary* summary =
2064 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2065 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2065 summary->set_in(0, Location::RequiresXmmRegister()); 2066 summary->set_in(0, Location::RequiresFpuRegister());
2066 summary->set_in(1, Location::RequiresXmmRegister()); 2067 summary->set_in(1, Location::RequiresFpuRegister());
2067 summary->set_out(Location::SameAsFirstInput()); 2068 summary->set_out(Location::SameAsFirstInput());
2068 return summary; 2069 return summary;
2069 } 2070 }
2070 2071
2071 2072
2072 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2073 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2073 XmmRegister left = locs()->in(0).xmm_reg(); 2074 XmmRegister left = locs()->in(0).fpu_reg();
2074 XmmRegister right = locs()->in(1).xmm_reg(); 2075 XmmRegister right = locs()->in(1).fpu_reg();
2075 2076
2076 ASSERT(locs()->out().xmm_reg() == left); 2077 ASSERT(locs()->out().fpu_reg() == left);
2077 2078
2078 switch (op_kind()) { 2079 switch (op_kind()) {
2079 case Token::kADD: __ addsd(left, right); break; 2080 case Token::kADD: __ addsd(left, right); break;
2080 case Token::kSUB: __ subsd(left, right); break; 2081 case Token::kSUB: __ subsd(left, right); break;
2081 case Token::kMUL: __ mulsd(left, right); break; 2082 case Token::kMUL: __ mulsd(left, right); break;
2082 case Token::kDIV: __ divsd(left, right); break; 2083 case Token::kDIV: __ divsd(left, right); break;
2083 default: UNREACHABLE(); 2084 default: UNREACHABLE();
2084 } 2085 }
2085 } 2086 }
2086 2087
2087 2088
2088 LocationSummary* MathSqrtInstr::MakeLocationSummary() const { 2089 LocationSummary* MathSqrtInstr::MakeLocationSummary() const {
2089 const intptr_t kNumInputs = 1; 2090 const intptr_t kNumInputs = 1;
2090 const intptr_t kNumTemps = 0; 2091 const intptr_t kNumTemps = 0;
2091 LocationSummary* summary = 2092 LocationSummary* summary =
2092 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2093 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2093 summary->set_in(0, Location::RequiresXmmRegister()); 2094 summary->set_in(0, Location::RequiresFpuRegister());
2094 summary->set_out(Location::RequiresXmmRegister()); 2095 summary->set_out(Location::RequiresFpuRegister());
2095 return summary; 2096 return summary;
2096 } 2097 }
2097 2098
2098 2099
2099 void MathSqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2100 void MathSqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2100 __ sqrtsd(locs()->out().xmm_reg(), locs()->in(0).xmm_reg()); 2101 __ sqrtsd(locs()->out().fpu_reg(), locs()->in(0).fpu_reg());
2101 } 2102 }
2102 2103
2103 2104
2104 LocationSummary* UnarySmiOpInstr::MakeLocationSummary() const { 2105 LocationSummary* UnarySmiOpInstr::MakeLocationSummary() const {
2105 const intptr_t kNumInputs = 1; 2106 const intptr_t kNumInputs = 1;
2106 const intptr_t kNumTemps = 0; 2107 const intptr_t kNumTemps = 0;
2107 LocationSummary* summary = 2108 LocationSummary* summary =
2108 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2109 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2109 summary->set_in(0, Location::RequiresRegister()); 2110 summary->set_in(0, Location::RequiresRegister());
2110 summary->set_out(Location::SameAsFirstInput()); 2111 summary->set_out(Location::SameAsFirstInput());
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
2215 locs()); 2216 locs());
2216 __ Bind(&done); 2217 __ Bind(&done);
2217 } 2218 }
2218 2219
2219 2220
2220 LocationSummary* DoubleToSmiInstr::MakeLocationSummary() const { 2221 LocationSummary* DoubleToSmiInstr::MakeLocationSummary() const {
2221 const intptr_t kNumInputs = 1; 2222 const intptr_t kNumInputs = 1;
2222 const intptr_t kNumTemps = 1; 2223 const intptr_t kNumTemps = 1;
2223 LocationSummary* result = new LocationSummary( 2224 LocationSummary* result = new LocationSummary(
2224 kNumInputs, kNumTemps, LocationSummary::kNoCall); 2225 kNumInputs, kNumTemps, LocationSummary::kNoCall);
2225 result->set_in(0, Location::RequiresXmmRegister()); 2226 result->set_in(0, Location::RequiresFpuRegister());
2226 result->set_out(Location:: Location::RequiresRegister()); 2227 result->set_out(Location:: Location::RequiresRegister());
2227 result->set_temp(0, Location::RequiresRegister()); 2228 result->set_temp(0, Location::RequiresRegister());
2228 return result; 2229 return result;
2229 } 2230 }
2230 2231
2231 2232
2232 void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2233 void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2233 Label* deopt = compiler->AddDeoptStub(deopt_id(), kDeoptDoubleToSmi); 2234 Label* deopt = compiler->AddDeoptStub(deopt_id(), kDeoptDoubleToSmi);
2234 Register result = locs()->out().reg(); 2235 Register result = locs()->out().reg();
2235 XmmRegister value = locs()->in(0).xmm_reg(); 2236 XmmRegister value = locs()->in(0).fpu_reg();
2236 Register temp = locs()->temp(0).reg(); 2237 Register temp = locs()->temp(0).reg();
2237 2238
2238 __ cvttsd2siq(result, value); 2239 __ cvttsd2siq(result, value);
2239 // Overflow is signalled with minint. 2240 // Overflow is signalled with minint.
2240 Label do_call, done; 2241 Label do_call, done;
2241 // Check for overflow and that it fits into Smi. 2242 // Check for overflow and that it fits into Smi.
2242 __ movq(temp, result); 2243 __ movq(temp, result);
2243 __ shlq(temp, Immediate(1)); 2244 __ shlq(temp, Immediate(1));
2244 __ j(OVERFLOW, deopt); 2245 __ j(OVERFLOW, deopt);
2245 __ SmiTag(result); 2246 __ SmiTag(result);
2246 } 2247 }
2247 2248
2248 2249
2249 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary() const { 2250 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary() const {
2250 const intptr_t kNumInputs = 1; 2251 const intptr_t kNumInputs = 1;
2251 const intptr_t kNumTemps = 2252 const intptr_t kNumTemps =
2252 (recognized_kind() == MethodRecognizer::kDoubleRound) ? 1 : 0; 2253 (recognized_kind() == MethodRecognizer::kDoubleRound) ? 1 : 0;
2253 LocationSummary* result = 2254 LocationSummary* result =
2254 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2255 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2255 result->set_in(0, Location::RequiresXmmRegister()); 2256 result->set_in(0, Location::RequiresFpuRegister());
2256 result->set_out(Location::RequiresXmmRegister()); 2257 result->set_out(Location::RequiresFpuRegister());
2257 if (recognized_kind() == MethodRecognizer::kDoubleRound) { 2258 if (recognized_kind() == MethodRecognizer::kDoubleRound) {
2258 result->set_temp(0, Location::RequiresXmmRegister()); 2259 result->set_temp(0, Location::RequiresFpuRegister());
2259 } 2260 }
2260 return result; 2261 return result;
2261 } 2262 }
2262 2263
2263 2264
2264 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2265 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2265 XmmRegister value = locs()->in(0).xmm_reg(); 2266 XmmRegister value = locs()->in(0).fpu_reg();
2266 XmmRegister result = locs()->out().xmm_reg(); 2267 XmmRegister result = locs()->out().fpu_reg();
2267 if (recognized_kind() == MethodRecognizer::kDoubleTruncate) { 2268 if (recognized_kind() == MethodRecognizer::kDoubleTruncate) {
2268 __ roundsd(result, value, Assembler::kRoundToZero); 2269 __ roundsd(result, value, Assembler::kRoundToZero);
2269 } else { 2270 } else {
2270 XmmRegister temp = locs()->temp(0).xmm_reg(); 2271 XmmRegister temp = locs()->temp(0).fpu_reg();
2271 __ DoubleRound(result, value, temp); 2272 __ DoubleRound(result, value, temp);
2272 } 2273 }
2273 } 2274 }
2274 2275
2275 2276
2276 LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary() const { 2277 LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary() const {
2277 return MakeCallSummary(); 2278 return MakeCallSummary();
2278 } 2279 }
2279 2280
2280 2281
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
2499 LocationSummary* ShiftMintOpInstr::MakeLocationSummary() const { 2500 LocationSummary* ShiftMintOpInstr::MakeLocationSummary() const {
2500 UNIMPLEMENTED(); 2501 UNIMPLEMENTED();
2501 return NULL; 2502 return NULL;
2502 } 2503 }
2503 2504
2504 2505
2505 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2506 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2506 UNIMPLEMENTED(); 2507 UNIMPLEMENTED();
2507 } 2508 }
2508 2509
2510
2511 LocationSummary* ThrowInstr::MakeLocationSummary() const {
2512 return new LocationSummary(0, 0, LocationSummary::kCall);
2513 }
2514
2515
2516 void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2517 compiler->GenerateCallRuntime(token_pos(),
2518 kThrowRuntimeEntry,
2519 locs());
2520 __ int3();
2521 }
2522
2523
2524 LocationSummary* ReThrowInstr::MakeLocationSummary() const {
2525 return new LocationSummary(0, 0, LocationSummary::kCall);
2526 }
2527
2528
2529 void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2530 compiler->GenerateCallRuntime(token_pos(),
2531 kReThrowRuntimeEntry,
2532 locs());
2533 __ int3();
2534 }
2535
2536
2537 LocationSummary* GotoInstr::MakeLocationSummary() const {
2538 return new LocationSummary(0, 0, LocationSummary::kNoCall);
2539 }
2540
2541
2542 void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2543 // Add deoptimization descriptor for deoptimizing instructions
2544 // that may be inserted before this instruction.
2545 if (!compiler->is_optimizing()) {
2546 compiler->AddCurrentDescriptor(PcDescriptors::kDeoptBefore,
2547 GetDeoptId(),
2548 0); // No token position.
2549 }
2550
2551 if (HasParallelMove()) {
2552 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
2553 }
2554
2555 // We can fall through if the successor is the next block in the list.
2556 // Otherwise, we need a jump.
2557 if (!compiler->IsNextBlock(successor())) {
2558 __ jmp(compiler->GetBlockLabel(successor()));
2559 }
2560 }
2561
2562
2563 static Condition NegateCondition(Condition condition) {
2564 switch (condition) {
2565 case EQUAL: return NOT_EQUAL;
2566 case NOT_EQUAL: return EQUAL;
2567 case LESS: return GREATER_EQUAL;
2568 case LESS_EQUAL: return GREATER;
2569 case GREATER: return LESS_EQUAL;
2570 case GREATER_EQUAL: return LESS;
2571 case BELOW: return ABOVE_EQUAL;
2572 case BELOW_EQUAL: return ABOVE;
2573 case ABOVE: return BELOW_EQUAL;
2574 case ABOVE_EQUAL: return BELOW;
2575 default:
2576 OS::Print("Error %d\n", condition);
2577 UNIMPLEMENTED();
2578 return EQUAL;
2579 }
2580 }
2581
2582
2583 void ControlInstruction::EmitBranchOnValue(FlowGraphCompiler* compiler,
2584 bool value) {
2585 if (value && compiler->IsNextBlock(false_successor())) {
2586 __ jmp(compiler->GetBlockLabel(true_successor()));
2587 } else if (!value && compiler->IsNextBlock(true_successor())) {
2588 __ jmp(compiler->GetBlockLabel(false_successor()));
2589 }
2590 }
2591
2592
2593 void ControlInstruction::EmitBranchOnCondition(FlowGraphCompiler* compiler,
2594 Condition true_condition) {
2595 if (compiler->IsNextBlock(false_successor())) {
2596 // If the next block is the false successor we will fall through to it.
2597 __ j(true_condition, compiler->GetBlockLabel(true_successor()));
2598 } else {
2599 // If the next block is the true successor we negate comparison and fall
2600 // through to it.
2601 ASSERT(compiler->IsNextBlock(true_successor()));
2602 Condition false_condition = NegateCondition(true_condition);
2603 __ j(false_condition, compiler->GetBlockLabel(false_successor()));
2604 }
2605 }
2606
2607
2608 LocationSummary* CurrentContextInstr::MakeLocationSummary() const {
2609 return LocationSummary::Make(0,
2610 Location::RequiresRegister(),
2611 LocationSummary::kNoCall);
2612 }
2613
2614
2615 void CurrentContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2616 __ MoveRegister(locs()->out().reg(), CTX);
2617 }
2618
2619
2620 LocationSummary* StrictCompareInstr::MakeLocationSummary() const {
2621 const intptr_t kNumInputs = 2;
2622 const intptr_t kNumTemps = 0;
2623 LocationSummary* locs =
2624 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2625 locs->set_in(0, Location::RegisterOrConstant(left()));
2626 locs->set_in(1, Location::RegisterOrConstant(right()));
2627 locs->set_out(Location::RequiresRegister());
2628 return locs;
2629 }
2630
2631
2632 // Special code for numbers (compare values instead of references.)
2633 void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2634 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
2635 Location left = locs()->in(0);
2636 Location right = locs()->in(1);
2637 if (left.IsConstant() && right.IsConstant()) {
2638 // TODO(vegorov): should be eliminated earlier by constant propagation.
2639 const bool result = (kind() == Token::kEQ_STRICT) ?
2640 left.constant().raw() == right.constant().raw() :
2641 left.constant().raw() != right.constant().raw();
2642 __ LoadObject(locs()->out().reg(), result ? Bool::True() : Bool::False());
2643 return;
2644 }
2645 if (left.IsConstant()) {
2646 compiler->EmitEqualityRegConstCompare(right.reg(),
2647 left.constant(),
2648 needs_number_check());
2649 } else if (right.IsConstant()) {
2650 compiler->EmitEqualityRegConstCompare(left.reg(),
2651 right.constant(),
2652 needs_number_check());
2653 } else {
2654 compiler->EmitEqualityRegRegCompare(left.reg(),
2655 right.reg(),
2656 needs_number_check());
2657 }
2658
2659 Register result = locs()->out().reg();
2660 Label load_true, done;
2661 Condition true_condition = (kind() == Token::kEQ_STRICT) ? EQUAL : NOT_EQUAL;
2662 __ j(true_condition, &load_true, Assembler::kNearJump);
2663 __ LoadObject(result, Bool::False());
2664 __ jmp(&done, Assembler::kNearJump);
2665 __ Bind(&load_true);
2666 __ LoadObject(result, Bool::True());
2667 __ Bind(&done);
2668 }
2669
2670
2671 void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler,
2672 BranchInstr* branch) {
2673 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
2674 Location left = locs()->in(0);
2675 Location right = locs()->in(1);
2676 if (left.IsConstant() && right.IsConstant()) {
2677 // TODO(vegorov): should be eliminated earlier by constant propagation.
2678 const bool result = (kind() == Token::kEQ_STRICT) ?
2679 left.constant().raw() == right.constant().raw() :
2680 left.constant().raw() != right.constant().raw();
2681 branch->EmitBranchOnValue(compiler, result);
2682 return;
2683 }
2684 if (left.IsConstant()) {
2685 compiler->EmitEqualityRegConstCompare(right.reg(),
2686 left.constant(),
2687 needs_number_check());
2688 } else if (right.IsConstant()) {
2689 compiler->EmitEqualityRegConstCompare(left.reg(),
2690 right.constant(),
2691 needs_number_check());
2692 } else {
2693 compiler->EmitEqualityRegRegCompare(left.reg(),
2694 right.reg(),
2695 needs_number_check());
2696 }
2697
2698 Condition true_condition = (kind() == Token::kEQ_STRICT) ? EQUAL : NOT_EQUAL;
2699 branch->EmitBranchOnCondition(compiler, true_condition);
2700 }
2701
2702
2703 void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2704 // The arguments to the stub include the closure, as does the arguments
2705 // descriptor.
2706 Register temp_reg = locs()->temp(0).reg();
2707 int argument_count = ArgumentCount();
2708 const Array& arguments_descriptor =
2709 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count,
2710 argument_names()));
2711 __ LoadObject(temp_reg, arguments_descriptor);
2712 compiler->GenerateDartCall(deopt_id(),
2713 token_pos(),
2714 &StubCode::CallClosureFunctionLabel(),
2715 PcDescriptors::kOther,
2716 locs());
2717 __ Drop(argument_count);
2718 }
2719
2720
2721 LocationSummary* BooleanNegateInstr::MakeLocationSummary() const {
2722 return LocationSummary::Make(1,
2723 Location::RequiresRegister(),
2724 LocationSummary::kNoCall);
2725 }
2726
2727
2728 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2729 Register value = locs()->in(0).reg();
2730 Register result = locs()->out().reg();
2731
2732 Label done;
2733 __ LoadObject(result, Bool::True());
2734 __ CompareRegisters(result, value);
2735 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
2736 __ LoadObject(result, Bool::False());
2737 __ Bind(&done);
2738 }
2739
2740
2741 LocationSummary* ChainContextInstr::MakeLocationSummary() const {
2742 return LocationSummary::Make(1,
2743 Location::NoLocation(),
2744 LocationSummary::kNoCall);
2745 }
2746
2747
2748 void ChainContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2749 Register context_value = locs()->in(0).reg();
2750
2751 // Chain the new context in context_value to its parent in CTX.
2752 __ StoreIntoObject(context_value,
2753 FieldAddress(context_value, Context::parent_offset()),
2754 CTX);
2755 // Set new context as current context.
2756 __ MoveRegister(CTX, context_value);
2757 }
2758
2759
2760 LocationSummary* StoreVMFieldInstr::MakeLocationSummary() const {
2761 const intptr_t kNumInputs = 2;
2762 const intptr_t kNumTemps = 0;
2763 LocationSummary* locs =
2764 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2765 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister()
2766 : Location::RequiresRegister());
2767 locs->set_in(1, Location::RequiresRegister());
2768 return locs;
2769 }
2770
2771
2772 void StoreVMFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2773 Register value_reg = locs()->in(0).reg();
2774 Register dest_reg = locs()->in(1).reg();
2775
2776 if (value()->NeedsStoreBuffer()) {
2777 __ StoreIntoObject(dest_reg, FieldAddress(dest_reg, offset_in_bytes()),
2778 value_reg);
2779 } else {
2780 __ StoreIntoObjectNoBarrier(
2781 dest_reg, FieldAddress(dest_reg, offset_in_bytes()), value_reg);
2782 }
2783 }
2784
2785
2786 LocationSummary* AllocateObjectInstr::MakeLocationSummary() const {
2787 return MakeCallSummary();
2788 }
2789
2790
2791 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2792 const Class& cls = Class::ZoneHandle(constructor().Owner());
2793 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls));
2794 const ExternalLabel label(cls.ToCString(), stub.EntryPoint());
2795 compiler->GenerateCall(token_pos(),
2796 &label,
2797 PcDescriptors::kOther,
2798 locs());
2799 __ Drop(ArgumentCount()); // Discard arguments.
2800 }
2801
2802
2803 LocationSummary* CreateClosureInstr::MakeLocationSummary() const {
2804 return MakeCallSummary();
2805 }
2806
2807
2808 void CreateClosureInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2809 const Function& closure_function = function();
2810 ASSERT(!closure_function.IsImplicitStaticClosureFunction());
2811 const Code& stub = Code::Handle(
2812 StubCode::GetAllocationStubForClosure(closure_function));
2813 const ExternalLabel label(closure_function.ToCString(), stub.EntryPoint());
2814 compiler->GenerateCall(token_pos(),
2815 &label,
2816 PcDescriptors::kOther,
2817 locs());
2818 __ Drop(2); // Discard type arguments and receiver.
2819 }
2820
2509 } // namespace dart 2821 } // namespace dart
2510 2822
2511 #undef __ 2823 #undef __
2512 2824
2513 #endif // defined TARGET_ARCH_X64 2825 #endif // defined TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698