Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(165)

Side by Side Diff: runtime/vm/flow_graph_compiler_x64.cc

Issue 1192103004: VM: New calling convention for generated code. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "vm/ast_printer.h" 10 #include "vm/ast_printer.h"
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
183 #define __ assem-> 183 #define __ assem->
184 __ Comment("%s", Name()); 184 __ Comment("%s", Name());
185 __ Bind(entry_label()); 185 __ Bind(entry_label());
186 if (FLAG_trap_on_deoptimization) { 186 if (FLAG_trap_on_deoptimization) {
187 __ int3(); 187 __ int3();
188 } 188 }
189 189
190 ASSERT(deopt_env() != NULL); 190 ASSERT(deopt_env() != NULL);
191 191
192 StubCode* stub_code = compiler->isolate()->stub_code(); 192 StubCode* stub_code = compiler->isolate()->stub_code();
193 __ Call(&stub_code->DeoptimizeLabel(), PP); 193 __ Call(Code::Handle(stub_code->DeoptimizeCode()), PP);
194 set_pc_offset(assem->CodeSize()); 194 set_pc_offset(assem->CodeSize());
195 __ int3(); 195 __ int3();
196 #undef __ 196 #undef __
197 } 197 }
198 198
199 199
200 #define __ assembler()-> 200 #define __ assembler()->
201 201
202 202
203 // Fall through if bool_register contains null. 203 // Fall through if bool_register contains null.
(...skipping 20 matching lines...) Expand all
224 Label* is_not_instance_lbl) { 224 Label* is_not_instance_lbl) {
225 const SubtypeTestCache& type_test_cache = 225 const SubtypeTestCache& type_test_cache =
226 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); 226 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New());
227 StubCode* stub_code = isolate()->stub_code(); 227 StubCode* stub_code = isolate()->stub_code();
228 __ LoadObject(temp_reg, type_test_cache, PP); 228 __ LoadObject(temp_reg, type_test_cache, PP);
229 __ pushq(temp_reg); // Subtype test cache. 229 __ pushq(temp_reg); // Subtype test cache.
230 __ pushq(instance_reg); // Instance. 230 __ pushq(instance_reg); // Instance.
231 if (test_kind == kTestTypeOneArg) { 231 if (test_kind == kTestTypeOneArg) {
232 ASSERT(type_arguments_reg == kNoRegister); 232 ASSERT(type_arguments_reg == kNoRegister);
233 __ PushObject(Object::null_object(), PP); 233 __ PushObject(Object::null_object(), PP);
234 __ Call(&stub_code->Subtype1TestCacheLabel(), PP); 234 __ Call(Code::Handle(stub_code->Subtype1TestCacheCode()), PP);
235 } else if (test_kind == kTestTypeTwoArgs) { 235 } else if (test_kind == kTestTypeTwoArgs) {
236 ASSERT(type_arguments_reg == kNoRegister); 236 ASSERT(type_arguments_reg == kNoRegister);
237 __ PushObject(Object::null_object(), PP); 237 __ PushObject(Object::null_object(), PP);
238 __ Call(&stub_code->Subtype2TestCacheLabel(), PP); 238 __ Call(Code::Handle(stub_code->Subtype2TestCacheCode()), PP);
239 } else if (test_kind == kTestTypeThreeArgs) { 239 } else if (test_kind == kTestTypeThreeArgs) {
240 __ pushq(type_arguments_reg); 240 __ pushq(type_arguments_reg);
241 __ Call(&stub_code->Subtype3TestCacheLabel(), PP); 241 __ Call(Code::Handle(stub_code->Subtype3TestCacheCode()), PP);
242 } else { 242 } else {
243 UNREACHABLE(); 243 UNREACHABLE();
244 } 244 }
245 // Result is in RCX: null -> not found, otherwise Bool::True or Bool::False. 245 // Result is in RCX: null -> not found, otherwise Bool::True or Bool::False.
246 ASSERT(instance_reg != RCX); 246 ASSERT(instance_reg != RCX);
247 ASSERT(temp_reg != RCX); 247 ASSERT(temp_reg != RCX);
248 __ popq(instance_reg); // Discard. 248 __ popq(instance_reg); // Discard.
249 __ popq(instance_reg); // Restore receiver. 249 __ popq(instance_reg); // Restore receiver.
250 __ popq(temp_reg); // Discard. 250 __ popq(temp_reg); // Discard.
251 GenerateBoolToJump(RCX, is_instance_lbl, is_not_instance_lbl); 251 GenerateBoolToJump(RCX, is_instance_lbl, is_not_instance_lbl);
(...skipping 672 matching lines...) Expand 10 before | Expand all | Expand 10 after
924 __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset())); 924 __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
925 __ SmiUntag(RBX); 925 __ SmiUntag(RBX);
926 // Check that RCX equals RBX, i.e. no named arguments passed. 926 // Check that RCX equals RBX, i.e. no named arguments passed.
927 __ cmpq(RCX, RBX); 927 __ cmpq(RCX, RBX);
928 __ j(EQUAL, &all_arguments_processed, Assembler::kNearJump); 928 __ j(EQUAL, &all_arguments_processed, Assembler::kNearJump);
929 } 929 }
930 } 930 }
931 931
932 __ Bind(&wrong_num_arguments); 932 __ Bind(&wrong_num_arguments);
933 if (function.IsClosureFunction()) { 933 if (function.IsClosureFunction()) {
934 __ LeaveDartFrame(); // The arguments are still on the stack. 934 __ LeaveDartFrame(kKeepCalleePP); // The arguments are still on the stack.
935 __ jmp(&isolate()->stub_code()->CallClosureNoSuchMethodLabel()); 935 __ Jmp(Code::Handle(
936 isolate()->stub_code()->CallClosureNoSuchMethodCode()), PP);
936 // The noSuchMethod call may return to the caller, but not here. 937 // The noSuchMethod call may return to the caller, but not here.
937 } else if (check_correct_named_args) { 938 } else if (check_correct_named_args) {
938 __ Stop("Wrong arguments"); 939 __ Stop("Wrong arguments");
939 } 940 }
940 941
941 __ Bind(&all_arguments_processed); 942 __ Bind(&all_arguments_processed);
942 // Nullify originally passed arguments only after they have been copied and 943 // Nullify originally passed arguments only after they have been copied and
943 // checked, otherwise noSuchMethod would not see their original values. 944 // checked, otherwise noSuchMethod would not see their original values.
944 // This step can be skipped in case we decide that formal parameters are 945 // This step can be skipped in case we decide that formal parameters are
945 // implicitly final, since garbage collecting the unmodified value is not 946 // implicitly final, since garbage collecting the unmodified value is not
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
985 __ ret(); 986 __ ret();
986 } 987 }
987 988
988 989
989 // NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc 990 // NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc
990 // needs to be updated to match. 991 // needs to be updated to match.
991 void FlowGraphCompiler::EmitFrameEntry() { 992 void FlowGraphCompiler::EmitFrameEntry() {
992 ASSERT(Assembler::EntryPointToPcMarkerOffset() == 0); 993 ASSERT(Assembler::EntryPointToPcMarkerOffset() == 0);
993 994
994 const Function& function = parsed_function().function(); 995 const Function& function = parsed_function().function();
995 const Register new_pp = R13;
996 const Register new_pc = R12;
997
998 // Load PC marker.
999 const intptr_t kRIPRelativeLeaqSize = 7;
1000 const intptr_t entry_to_rip_offset = __ CodeSize() + kRIPRelativeLeaqSize;
1001 __ leaq(new_pc, Address::AddressRIPRelative(-entry_to_rip_offset));
1002 ASSERT(__ CodeSize() == entry_to_rip_offset);
1003
1004 // Load pool pointer. 996 // Load pool pointer.
1005 const intptr_t object_pool_pc_dist =
1006 Instructions::HeaderSize() - Instructions::object_pool_offset();
1007 __ movq(new_pp, Address(new_pc, -object_pool_pc_dist));
1008 997
1009 if (flow_graph().IsCompiledForOsr()) { 998 if (flow_graph().IsCompiledForOsr()) {
1010 intptr_t extra_slots = StackSize() 999 intptr_t extra_slots = StackSize()
1011 - flow_graph().num_stack_locals() 1000 - flow_graph().num_stack_locals()
1012 - flow_graph().num_copied_params(); 1001 - flow_graph().num_copied_params();
1013 ASSERT(extra_slots >= 0); 1002 ASSERT(extra_slots >= 0);
1014 __ EnterOsrFrame(extra_slots * kWordSize, new_pp, new_pc); 1003 __ EnterOsrFrame(extra_slots * kWordSize);
1015 } else { 1004 } else {
1005 const Register new_pp = R13;
1006 __ movq(new_pp, FieldAddress(CODE_REG, Code::object_pool_offset()));
1007
1016 if (CanOptimizeFunction() && 1008 if (CanOptimizeFunction() &&
1017 function.IsOptimizable() && 1009 function.IsOptimizable() &&
1018 (!is_optimizing() || may_reoptimize())) { 1010 (!is_optimizing() || may_reoptimize())) {
1019 const Register function_reg = RDI; 1011 const Register function_reg = RDI;
1020 // Load function object using the callee's pool pointer. 1012 // Load function object using the callee's pool pointer.
1021 __ LoadObject(function_reg, function, new_pp); 1013 __ LoadObject(function_reg, function, new_pp);
1022 1014
1023 // Patch point is after the eventually inlined function object. 1015 // Patch point is after the eventually inlined function object.
1024 entry_patch_pc_offset_ = assembler()->CodeSize(); 1016 entry_patch_pc_offset_ = assembler()->CodeSize();
1025 1017
1026 // Reoptimization of an optimized function is triggered by counting in 1018 // Reoptimization of an optimized function is triggered by counting in
1027 // IC stubs, but not at the entry of the function. 1019 // IC stubs, but not at the entry of the function.
1028 if (!is_optimizing()) { 1020 if (!is_optimizing()) {
1029 __ incl(FieldAddress(function_reg, Function::usage_counter_offset())); 1021 __ incl(FieldAddress(function_reg, Function::usage_counter_offset()));
1030 } 1022 }
1031 __ cmpl( 1023 __ cmpl(
1032 FieldAddress(function_reg, Function::usage_counter_offset()), 1024 FieldAddress(function_reg, Function::usage_counter_offset()),
1033 Immediate(GetOptimizationThreshold())); 1025 Immediate(GetOptimizationThreshold()));
1034 ASSERT(function_reg == RDI); 1026 ASSERT(function_reg == RDI);
1035 __ J(GREATER_EQUAL, 1027 __ J(GREATER_EQUAL,
1036 &isolate()->stub_code()->OptimizeFunctionLabel(), 1028 Code::Handle(isolate()->stub_code()->OptimizeFunctionCode()),
1037 new_pp); 1029 new_pp);
1038 } else { 1030 } else {
1039 entry_patch_pc_offset_ = assembler()->CodeSize(); 1031 entry_patch_pc_offset_ = assembler()->CodeSize();
1040 } 1032 }
1041 ASSERT(StackSize() >= 0); 1033 ASSERT(StackSize() >= 0);
1042 __ Comment("Enter frame"); 1034 __ Comment("Enter frame");
1043 __ EnterDartFrameWithInfo(StackSize() * kWordSize, new_pp, new_pc); 1035 __ EnterDartFrame(StackSize() * kWordSize, new_pp);
1044 } 1036 }
1045 } 1037 }
1046 1038
1047 1039
1048 void FlowGraphCompiler::CompileGraph() { 1040 void FlowGraphCompiler::CompileGraph() {
1049 InitCompiler(); 1041 InitCompiler();
1050 1042
1051 TryIntrinsify(); 1043 TryIntrinsify();
1052 1044
1053 EmitFrameEntry(); 1045 EmitFrameEntry();
(...skipping 24 matching lines...) Expand all
1078 __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset())); 1070 __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
1079 __ CompareImmediate(RAX, Immediate(Smi::RawValue(num_fixed_params)), PP); 1071 __ CompareImmediate(RAX, Immediate(Smi::RawValue(num_fixed_params)), PP);
1080 __ j(NOT_EQUAL, &wrong_num_arguments, Assembler::kNearJump); 1072 __ j(NOT_EQUAL, &wrong_num_arguments, Assembler::kNearJump);
1081 __ cmpq(RAX, 1073 __ cmpq(RAX,
1082 FieldAddress(R10, 1074 FieldAddress(R10,
1083 ArgumentsDescriptor::positional_count_offset())); 1075 ArgumentsDescriptor::positional_count_offset()));
1084 __ j(EQUAL, &correct_num_arguments, Assembler::kNearJump); 1076 __ j(EQUAL, &correct_num_arguments, Assembler::kNearJump);
1085 1077
1086 __ Bind(&wrong_num_arguments); 1078 __ Bind(&wrong_num_arguments);
1087 if (function.IsClosureFunction()) { 1079 if (function.IsClosureFunction()) {
1088 __ LeaveDartFrame(); // The arguments are still on the stack. 1080 __ LeaveDartFrame(kKeepCalleePP); // Leave arguments on the stack.
1089 __ jmp(&stub_code->CallClosureNoSuchMethodLabel()); 1081 __ Jmp(Code::Handle(stub_code->CallClosureNoSuchMethodCode()), PP);
1090 // The noSuchMethod call may return to the caller, but not here. 1082 // The noSuchMethod call may return to the caller, but not here.
1091 } else { 1083 } else {
1092 __ Stop("Wrong number of arguments"); 1084 __ Stop("Wrong number of arguments");
1093 } 1085 }
1094 __ Bind(&correct_num_arguments); 1086 __ Bind(&correct_num_arguments);
1095 } 1087 }
1096 } else if (!flow_graph().IsCompiledForOsr()) { 1088 } else if (!flow_graph().IsCompiledForOsr()) {
1097 CopyParameters(); 1089 CopyParameters();
1098 } 1090 }
1099 1091
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1145 ASSERT(!block_order().is_empty()); 1137 ASSERT(!block_order().is_empty());
1146 VisitBlocks(); 1138 VisitBlocks();
1147 1139
1148 __ int3(); 1140 __ int3();
1149 GenerateDeferredCode(); 1141 GenerateDeferredCode();
1150 // Emit function patching code. This will be swapped with the first 13 bytes 1142 // Emit function patching code. This will be swapped with the first 13 bytes
1151 // at entry point. 1143 // at entry point.
1152 patch_code_pc_offset_ = assembler()->CodeSize(); 1144 patch_code_pc_offset_ = assembler()->CodeSize();
1153 // This is patched up to a point in FrameEntry where the PP for the 1145 // This is patched up to a point in FrameEntry where the PP for the
1154 // current function is in R13 instead of PP. 1146 // current function is in R13 instead of PP.
1155 __ JmpPatchable(&stub_code->FixCallersTargetLabel(), R13); 1147 __ JmpPatchable(Code::Handle(stub_code->FixCallersTargetCode()), R13);
1156 1148
1157 if (is_optimizing()) { 1149 if (is_optimizing()) {
1158 lazy_deopt_pc_offset_ = assembler()->CodeSize(); 1150 lazy_deopt_pc_offset_ = assembler()->CodeSize();
1159 __ Jmp(&stub_code->DeoptimizeLazyLabel(), PP); 1151 __ Jmp(Code::Handle(stub_code->DeoptimizeLazyCode()), PP);
1160 } 1152 }
1161 } 1153 }
1162 1154
1163 1155
1164 void FlowGraphCompiler::GenerateCall(intptr_t token_pos, 1156 void FlowGraphCompiler::GenerateCall(intptr_t token_pos,
1165 const ExternalLabel* label, 1157 const Code& target,
1166 RawPcDescriptors::Kind kind, 1158 RawPcDescriptors::Kind kind,
1167 LocationSummary* locs) { 1159 LocationSummary* locs) {
1168 __ Call(label, PP); 1160 __ Call(target, PP);
1169 AddCurrentDescriptor(kind, Isolate::kNoDeoptId, token_pos); 1161 AddCurrentDescriptor(kind, Isolate::kNoDeoptId, token_pos);
1170 RecordSafepoint(locs); 1162 RecordSafepoint(locs);
1171 } 1163 }
1172 1164
1173 1165
1174 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, 1166 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
1175 intptr_t token_pos, 1167 intptr_t token_pos,
1176 const ExternalLabel* label, 1168 const Code& target,
1177 RawPcDescriptors::Kind kind, 1169 RawPcDescriptors::Kind kind,
1178 LocationSummary* locs) { 1170 LocationSummary* locs) {
1179 __ CallPatchable(label); 1171 __ CallPatchable(target);
1180 AddCurrentDescriptor(kind, deopt_id, token_pos); 1172 AddCurrentDescriptor(kind, deopt_id, token_pos);
1181 RecordSafepoint(locs); 1173 RecordSafepoint(locs);
1182 // Marks either the continuation point in unoptimized code or the 1174 // Marks either the continuation point in unoptimized code or the
1183 // deoptimization point in optimized code, after call. 1175 // deoptimization point in optimized code, after call.
1184 const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id); 1176 const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id);
1185 if (is_optimizing()) { 1177 if (is_optimizing()) {
1186 AddDeoptIndexAtCall(deopt_id_after, token_pos); 1178 AddDeoptIndexAtCall(deopt_id_after, token_pos);
1187 } else { 1179 } else {
1188 // Add deoptimization continuation point after the call and before the 1180 // Add deoptimization continuation point after the call and before the
1189 // arguments are removed. 1181 // arguments are removed.
(...skipping 25 matching lines...) Expand all
1215 } 1207 }
1216 1208
1217 1209
1218 void FlowGraphCompiler::EmitUnoptimizedStaticCall( 1210 void FlowGraphCompiler::EmitUnoptimizedStaticCall(
1219 intptr_t argument_count, 1211 intptr_t argument_count,
1220 intptr_t deopt_id, 1212 intptr_t deopt_id,
1221 intptr_t token_pos, 1213 intptr_t token_pos,
1222 LocationSummary* locs, 1214 LocationSummary* locs,
1223 const ICData& ic_data) { 1215 const ICData& ic_data) {
1224 StubCode* stub_code = isolate()->stub_code(); 1216 StubCode* stub_code = isolate()->stub_code();
1225 const uword label_address = 1217 const Code& target = Code::Handle(
1226 stub_code->UnoptimizedStaticCallEntryPoint(ic_data.NumArgsTested()); 1218 stub_code->UnoptimizedStaticCallCode(ic_data.NumArgsTested()));
1227 ExternalLabel target_label(label_address);
1228 __ LoadObject(RBX, ic_data, PP); 1219 __ LoadObject(RBX, ic_data, PP);
1229 GenerateDartCall(deopt_id, 1220 GenerateDartCall(deopt_id,
1230 token_pos, 1221 token_pos,
1231 &target_label, 1222 target,
1232 RawPcDescriptors::kUnoptStaticCall, 1223 RawPcDescriptors::kUnoptStaticCall,
1233 locs); 1224 locs);
1234 __ Drop(argument_count, RCX); 1225 __ Drop(argument_count, RCX);
1235 } 1226 }
1236 1227
1237 1228
1238 void FlowGraphCompiler::EmitEdgeCounter() { 1229 void FlowGraphCompiler::EmitEdgeCounter() {
1239 // We do not check for overflow when incrementing the edge counter. The 1230 // We do not check for overflow when incrementing the edge counter. The
1240 // function should normally be optimized long before the counter can 1231 // function should normally be optimized long before the counter can
1241 // overflow; and though we do not reset the counters when we optimize or 1232 // overflow; and though we do not reset the counters when we optimize or
(...skipping 15 matching lines...) Expand all
1257 1248
1258 1249
1259 int32_t FlowGraphCompiler::EdgeCounterIncrementSizeInBytes() { 1250 int32_t FlowGraphCompiler::EdgeCounterIncrementSizeInBytes() {
1260 const int32_t size = Isolate::Current()->edge_counter_increment_size(); 1251 const int32_t size = Isolate::Current()->edge_counter_increment_size();
1261 ASSERT(size != -1); 1252 ASSERT(size != -1);
1262 return size; 1253 return size;
1263 } 1254 }
1264 1255
1265 1256
1266 void FlowGraphCompiler::EmitOptimizedInstanceCall( 1257 void FlowGraphCompiler::EmitOptimizedInstanceCall(
1267 ExternalLabel* target_label, 1258 const Code& target,
1268 const ICData& ic_data, 1259 const ICData& ic_data,
1269 intptr_t argument_count, 1260 intptr_t argument_count,
1270 intptr_t deopt_id, 1261 intptr_t deopt_id,
1271 intptr_t token_pos, 1262 intptr_t token_pos,
1272 LocationSummary* locs) { 1263 LocationSummary* locs) {
1273 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); 1264 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
1274 // Each ICData propagated from unoptimized to optimized code contains the 1265 // Each ICData propagated from unoptimized to optimized code contains the
1275 // function that corresponds to the Dart function of that IC call. Due 1266 // function that corresponds to the Dart function of that IC call. Due
1276 // to inlining in optimized code, that function may not correspond to the 1267 // to inlining in optimized code, that function may not correspond to the
1277 // top-level function (parsed_function().function()) which could be 1268 // top-level function (parsed_function().function()) which could be
1278 // reoptimized and which counter needs to be incremented. 1269 // reoptimized and which counter needs to be incremented.
1279 // Pass the function explicitly, it is used in IC stub. 1270 // Pass the function explicitly, it is used in IC stub.
1280 __ LoadObject(RDI, parsed_function().function(), PP); 1271 __ LoadObject(RDI, parsed_function().function(), PP);
1281 __ LoadObject(RBX, ic_data, PP); 1272 __ LoadObject(RBX, ic_data, PP);
1282 GenerateDartCall(deopt_id, 1273 GenerateDartCall(deopt_id,
1283 token_pos, 1274 token_pos,
1284 target_label, 1275 target,
1285 RawPcDescriptors::kIcCall, 1276 RawPcDescriptors::kIcCall,
1286 locs); 1277 locs);
1287 __ Drop(argument_count, RCX); 1278 __ Drop(argument_count, RCX);
1288 } 1279 }
1289 1280
1290 1281
1291 void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label, 1282 void FlowGraphCompiler::EmitInstanceCall(const Code& target,
1292 const ICData& ic_data, 1283 const ICData& ic_data,
1293 intptr_t argument_count, 1284 intptr_t argument_count,
1294 intptr_t deopt_id, 1285 intptr_t deopt_id,
1295 intptr_t token_pos, 1286 intptr_t token_pos,
1296 LocationSummary* locs) { 1287 LocationSummary* locs) {
1297 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); 1288 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
1298 __ LoadObject(RBX, ic_data, PP); 1289 __ LoadObject(RBX, ic_data, PP);
1299 GenerateDartCall(deopt_id, 1290 GenerateDartCall(deopt_id,
1300 token_pos, 1291 token_pos,
1301 target_label, 1292 target,
1302 RawPcDescriptors::kIcCall, 1293 RawPcDescriptors::kIcCall,
1303 locs); 1294 locs);
1304 __ Drop(argument_count, RCX); 1295 __ Drop(argument_count, RCX);
1305 } 1296 }
1306 1297
1307 1298
1308 void FlowGraphCompiler::EmitMegamorphicInstanceCall( 1299 void FlowGraphCompiler::EmitMegamorphicInstanceCall(
1309 const ICData& ic_data, 1300 const ICData& ic_data,
1310 intptr_t argument_count, 1301 intptr_t argument_count,
1311 intptr_t deopt_id, 1302 intptr_t deopt_id,
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1354 intptr_t argument_count, 1345 intptr_t argument_count,
1355 intptr_t deopt_id, 1346 intptr_t deopt_id,
1356 intptr_t token_pos, 1347 intptr_t token_pos,
1357 LocationSummary* locs) { 1348 LocationSummary* locs) {
1358 StubCode* stub_code = isolate()->stub_code(); 1349 StubCode* stub_code = isolate()->stub_code();
1359 __ LoadObject(R10, arguments_descriptor, PP); 1350 __ LoadObject(R10, arguments_descriptor, PP);
1360 // Do not use the code from the function, but let the code be patched so that 1351 // Do not use the code from the function, but let the code be patched so that
1361 // we can record the outgoing edges to other code. 1352 // we can record the outgoing edges to other code.
1362 GenerateDartCall(deopt_id, 1353 GenerateDartCall(deopt_id,
1363 token_pos, 1354 token_pos,
1364 &stub_code->CallStaticFunctionLabel(), 1355 Code::Handle(stub_code->CallStaticFunctionCode()),
1365 RawPcDescriptors::kOther, 1356 RawPcDescriptors::kOther,
1366 locs); 1357 locs);
1367 AddStaticCallTarget(function); 1358 AddStaticCallTarget(function);
1368 __ Drop(argument_count, RCX); 1359 __ Drop(argument_count, RCX);
1369 } 1360 }
1370 1361
1371 1362
1372 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( 1363 Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
1373 Register reg, 1364 Register reg,
1374 const Object& obj, 1365 const Object& obj,
1375 bool needs_number_check, 1366 bool needs_number_check,
1376 intptr_t token_pos) { 1367 intptr_t token_pos) {
1377 ASSERT(!needs_number_check || 1368 ASSERT(!needs_number_check ||
1378 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint())); 1369 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()));
1379 1370
1380 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) { 1371 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
1381 ASSERT(!needs_number_check); 1372 ASSERT(!needs_number_check);
1382 __ testq(reg, reg); 1373 __ testq(reg, reg);
1383 return EQUAL; 1374 return EQUAL;
1384 } 1375 }
1385 1376
1386 if (needs_number_check) { 1377 if (needs_number_check) {
1387 StubCode* stub_code = isolate()->stub_code(); 1378 StubCode* stub_code = isolate()->stub_code();
1388 __ pushq(reg); 1379 __ pushq(reg);
1389 __ PushObject(obj, PP); 1380 __ PushObject(obj, PP);
1390 if (is_optimizing()) { 1381 if (is_optimizing()) {
1391 __ CallPatchable(&stub_code->OptimizedIdenticalWithNumberCheckLabel()); 1382 __ CallPatchable(Code::Handle(
1383 stub_code->OptimizedIdenticalWithNumberCheckCode()));
1392 } else { 1384 } else {
1393 __ CallPatchable(&stub_code->UnoptimizedIdenticalWithNumberCheckLabel()); 1385 __ CallPatchable(Code::Handle(
1386 stub_code->UnoptimizedIdenticalWithNumberCheckCode()));
1394 } 1387 }
1395 if (token_pos != Scanner::kNoSourcePos) { 1388 if (token_pos != Scanner::kNoSourcePos) {
1396 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1389 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall,
1397 Isolate::kNoDeoptId, 1390 Isolate::kNoDeoptId,
1398 token_pos); 1391 token_pos);
1399 } 1392 }
1400 // Stub returns result in flags (result of a cmpq, we need ZF computed). 1393 // Stub returns result in flags (result of a cmpq, we need ZF computed).
1401 __ popq(reg); // Discard constant. 1394 __ popq(reg); // Discard constant.
1402 __ popq(reg); // Restore 'reg'. 1395 __ popq(reg); // Restore 'reg'.
1403 } else { 1396 } else {
1404 __ CompareObject(reg, obj, PP); 1397 __ CompareObject(reg, obj, PP);
1405 } 1398 }
1406 return EQUAL; 1399 return EQUAL;
1407 } 1400 }
1408 1401
1409 1402
1410 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, 1403 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
1411 Register right, 1404 Register right,
1412 bool needs_number_check, 1405 bool needs_number_check,
1413 intptr_t token_pos) { 1406 intptr_t token_pos) {
1414 if (needs_number_check) { 1407 if (needs_number_check) {
1415 StubCode* stub_code = isolate()->stub_code(); 1408 StubCode* stub_code = isolate()->stub_code();
1416 __ pushq(left); 1409 __ pushq(left);
1417 __ pushq(right); 1410 __ pushq(right);
1418 if (is_optimizing()) { 1411 if (is_optimizing()) {
1419 __ CallPatchable(&stub_code->OptimizedIdenticalWithNumberCheckLabel()); 1412 __ CallPatchable(Code::Handle(
1413 stub_code->OptimizedIdenticalWithNumberCheckCode()));
1420 } else { 1414 } else {
1421 __ CallPatchable(&stub_code->UnoptimizedIdenticalWithNumberCheckLabel()); 1415 __ CallPatchable(Code::Handle(
1416 stub_code->UnoptimizedIdenticalWithNumberCheckCode()));
1422 } 1417 }
1423 if (token_pos != Scanner::kNoSourcePos) { 1418 if (token_pos != Scanner::kNoSourcePos) {
1424 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1419 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall,
1425 Isolate::kNoDeoptId, 1420 Isolate::kNoDeoptId,
1426 token_pos); 1421 token_pos);
1427 } 1422 }
1428 // Stub returns result in flags (result of a cmpq, we need ZF computed). 1423 // Stub returns result in flags (result of a cmpq, we need ZF computed).
1429 __ popq(right); 1424 __ popq(right);
1430 __ popq(left); 1425 __ popq(left);
1431 } else { 1426 } else {
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
1501 // Jump if receiver is not Smi. 1496 // Jump if receiver is not Smi.
1502 if (kNumChecks == 1) { 1497 if (kNumChecks == 1) {
1503 __ j(NOT_ZERO, failed); 1498 __ j(NOT_ZERO, failed);
1504 } else { 1499 } else {
1505 __ j(NOT_ZERO, &after_smi_test); 1500 __ j(NOT_ZERO, &after_smi_test);
1506 } 1501 }
1507 // Do not use the code from the function, but let the code be patched so 1502 // Do not use the code from the function, but let the code be patched so
1508 // that we can record the outgoing edges to other code. 1503 // that we can record the outgoing edges to other code.
1509 GenerateDartCall(deopt_id, 1504 GenerateDartCall(deopt_id,
1510 token_index, 1505 token_index,
1511 &stub_code->CallStaticFunctionLabel(), 1506 Code::Handle(stub_code->CallStaticFunctionCode()),
1512 RawPcDescriptors::kOther, 1507 RawPcDescriptors::kOther,
1513 locs); 1508 locs);
1514 const Function& function = Function::Handle(ic_data.GetTargetAt(0)); 1509 const Function& function = Function::Handle(ic_data.GetTargetAt(0));
1515 AddStaticCallTarget(function); 1510 AddStaticCallTarget(function);
1516 __ Drop(argument_count, RCX); 1511 __ Drop(argument_count, RCX);
1517 if (kNumChecks > 1) { 1512 if (kNumChecks > 1) {
1518 __ jmp(match_found); 1513 __ jmp(match_found);
1519 } 1514 }
1520 } else { 1515 } else {
1521 // Receiver is Smi, but Smi is not a valid class therefore fail. 1516 // Receiver is Smi, but Smi is not a valid class therefore fail.
(...skipping 20 matching lines...) Expand all
1542 __ cmpl(RDI, Immediate(sorted[i].cid)); 1537 __ cmpl(RDI, Immediate(sorted[i].cid));
1543 if (kIsLastCheck) { 1538 if (kIsLastCheck) {
1544 __ j(NOT_EQUAL, failed); 1539 __ j(NOT_EQUAL, failed);
1545 } else { 1540 } else {
1546 __ j(NOT_EQUAL, &next_test); 1541 __ j(NOT_EQUAL, &next_test);
1547 } 1542 }
1548 // Do not use the code from the function, but let the code be patched so 1543 // Do not use the code from the function, but let the code be patched so
1549 // that we can record the outgoing edges to other code. 1544 // that we can record the outgoing edges to other code.
1550 GenerateDartCall(deopt_id, 1545 GenerateDartCall(deopt_id,
1551 token_index, 1546 token_index,
1552 &stub_code->CallStaticFunctionLabel(), 1547 Code::Handle(stub_code->CallStaticFunctionCode()),
1553 RawPcDescriptors::kOther, 1548 RawPcDescriptors::kOther,
1554 locs); 1549 locs);
1555 const Function& function = *sorted[i].target; 1550 const Function& function = *sorted[i].target;
1556 AddStaticCallTarget(function); 1551 AddStaticCallTarget(function);
1557 __ Drop(argument_count, RCX); 1552 __ Drop(argument_count, RCX);
1558 if (!kIsLastCheck) { 1553 if (!kIsLastCheck) {
1559 __ jmp(match_found); 1554 __ jmp(match_found);
1560 } 1555 }
1561 __ Bind(&next_test); 1556 __ Bind(&next_test);
1562 } 1557 }
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
1794 __ movups(reg, Address(RSP, 0)); 1789 __ movups(reg, Address(RSP, 0));
1795 __ AddImmediate(RSP, Immediate(kFpuRegisterSize), PP); 1790 __ AddImmediate(RSP, Immediate(kFpuRegisterSize), PP);
1796 } 1791 }
1797 1792
1798 1793
1799 #undef __ 1794 #undef __
1800 1795
1801 } // namespace dart 1796 } // namespace dart
1802 1797
1803 #endif // defined TARGET_ARCH_X64 1798 #endif // defined TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698