Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(380)

Side by Side Diff: runtime/vm/flow_graph_compiler.cc

Issue 2481873005: clang-format runtime/vm (Closed)
Patch Set: Merge Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler.h ('k') | runtime/vm/flow_graph_compiler_arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX.
6 6
7 #include "vm/flow_graph_compiler.h" 7 #include "vm/flow_graph_compiler.h"
8 8
9 #include "vm/bit_vector.h" 9 #include "vm/bit_vector.h"
10 #include "vm/cha.h" 10 #include "vm/cha.h"
(...skipping 12 matching lines...) Expand all
23 #include "vm/object_store.h" 23 #include "vm/object_store.h"
24 #include "vm/parser.h" 24 #include "vm/parser.h"
25 #include "vm/raw_object.h" 25 #include "vm/raw_object.h"
26 #include "vm/stack_frame.h" 26 #include "vm/stack_frame.h"
27 #include "vm/stub_code.h" 27 #include "vm/stub_code.h"
28 #include "vm/symbols.h" 28 #include "vm/symbols.h"
29 #include "vm/timeline.h" 29 #include "vm/timeline.h"
30 30
31 namespace dart { 31 namespace dart {
32 32
33 DEFINE_FLAG(bool, enable_simd_inline, true, 33 DEFINE_FLAG(bool,
34 "Enable inlining of SIMD related method calls."); 34 enable_simd_inline,
35 DEFINE_FLAG(bool, inline_smi_string_hashcode, true, 35 true,
36 "Enable inlining of SIMD related method calls.");
37 DEFINE_FLAG(
38 bool,
39 inline_smi_string_hashcode,
40 true,
36 "Inline hashcode for Smi and one-byte strings in case of megamorphic call"); 41 "Inline hashcode for Smi and one-byte strings in case of megamorphic call");
37 DEFINE_FLAG(int, inline_smi_string_hashcode_ratio, 50, 42 DEFINE_FLAG(
43 int,
44 inline_smi_string_hashcode_ratio,
45 50,
38 "Minimal hotness (0..100) of one-byte-string before inlining its hashcode"); 46 "Minimal hotness (0..100) of one-byte-string before inlining its hashcode");
39 DEFINE_FLAG(int, min_optimization_counter_threshold, 5000, 47 DEFINE_FLAG(int,
40 "The minimum invocation count for a function."); 48 min_optimization_counter_threshold,
41 DEFINE_FLAG(int, optimization_counter_scale, 2000, 49 5000,
42 "The scale of invocation count, by size of the function."); 50 "The minimum invocation count for a function.");
51 DEFINE_FLAG(int,
52 optimization_counter_scale,
53 2000,
54 "The scale of invocation count, by size of the function.");
43 DEFINE_FLAG(bool, source_lines, false, "Emit source line as assembly comment."); 55 DEFINE_FLAG(bool, source_lines, false, "Emit source line as assembly comment.");
44 DEFINE_FLAG(bool, trace_inlining_intervals, false, 56 DEFINE_FLAG(bool,
45 "Inlining interval diagnostics"); 57 trace_inlining_intervals,
58 false,
59 "Inlining interval diagnostics");
46 60
47 DECLARE_FLAG(bool, code_comments); 61 DECLARE_FLAG(bool, code_comments);
48 DECLARE_FLAG(charp, deoptimize_filter); 62 DECLARE_FLAG(charp, deoptimize_filter);
49 DECLARE_FLAG(bool, intrinsify); 63 DECLARE_FLAG(bool, intrinsify);
50 DECLARE_FLAG(bool, propagate_ic_data); 64 DECLARE_FLAG(bool, propagate_ic_data);
51 DECLARE_FLAG(int, regexp_optimization_counter_threshold); 65 DECLARE_FLAG(int, regexp_optimization_counter_threshold);
52 DECLARE_FLAG(int, reoptimization_counter_threshold); 66 DECLARE_FLAG(int, reoptimization_counter_threshold);
53 DECLARE_FLAG(int, stacktrace_every); 67 DECLARE_FLAG(int, stacktrace_every);
54 DECLARE_FLAG(charp, stacktrace_filter); 68 DECLARE_FLAG(charp, stacktrace_filter);
55 DECLARE_FLAG(bool, trace_compiler); 69 DECLARE_FLAG(bool, trace_compiler);
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
157 } 171 }
158 } 172 }
159 } 173 }
160 174
161 175
162 // Returns true if OnebyteString is a frequent receiver class. We inline 176 // Returns true if OnebyteString is a frequent receiver class. We inline
163 // Smi check as well, since a Smi check must be done anyway. 177 // Smi check as well, since a Smi check must be done anyway.
164 // TODO(srdjan): Add check and code if Smi class is hot. 178 // TODO(srdjan): Add check and code if Smi class is hot.
165 bool FlowGraphCompiler::ShouldInlineSmiStringHashCode(const ICData& ic_data) { 179 bool FlowGraphCompiler::ShouldInlineSmiStringHashCode(const ICData& ic_data) {
166 if (!FLAG_inline_smi_string_hashcode || 180 if (!FLAG_inline_smi_string_hashcode ||
167 (ic_data.target_name() != Symbols::hashCode().raw())) { 181 (ic_data.target_name() != Symbols::hashCode().raw())) {
168 return false; 182 return false;
169 } 183 }
170 // Precompiled code has no ICData, optimistically inline it. 184 // Precompiled code has no ICData, optimistically inline it.
171 if (ic_data.IsNull() || (ic_data.NumberOfChecks() == 0)) { 185 if (ic_data.IsNull() || (ic_data.NumberOfChecks() == 0)) {
172 return true; 186 return true;
173 } 187 }
174 // Check if OneByteString is hot enough. 188 // Check if OneByteString is hot enough.
175 const ICData& ic_data_sorted = 189 const ICData& ic_data_sorted =
176 ICData::Handle(ic_data.AsUnaryClassChecksSortedByCount()); 190 ICData::Handle(ic_data.AsUnaryClassChecksSortedByCount());
177 ASSERT(ic_data_sorted.NumberOfChecks() > 0); 191 ASSERT(ic_data_sorted.NumberOfChecks() > 0);
178 if (ic_data_sorted.GetReceiverClassIdAt(0) == kOneByteStringCid) { 192 if (ic_data_sorted.GetReceiverClassIdAt(0) == kOneByteStringCid) {
179 const intptr_t total_count = ic_data_sorted.AggregateCount(); 193 const intptr_t total_count = ic_data_sorted.AggregateCount();
180 const intptr_t ratio = (ic_data_sorted.GetCountAt(0) * 100) / total_count; 194 const intptr_t ratio = (ic_data_sorted.GetCountAt(0) * 100) / total_count;
181 return ratio > FLAG_inline_smi_string_hashcode_ratio; 195 return ratio > FLAG_inline_smi_string_hashcode_ratio;
182 } 196 }
183 return false; 197 return false;
184 } 198 }
185 199
186 200
187 FlowGraphCompiler::FlowGraphCompiler( 201 FlowGraphCompiler::FlowGraphCompiler(
188 Assembler* assembler, 202 Assembler* assembler,
189 FlowGraph* flow_graph, 203 FlowGraph* flow_graph,
190 const ParsedFunction& parsed_function, 204 const ParsedFunction& parsed_function,
191 bool is_optimizing, 205 bool is_optimizing,
192 const GrowableArray<const Function*>& inline_id_to_function, 206 const GrowableArray<const Function*>& inline_id_to_function,
193 const GrowableArray<TokenPosition>& inline_id_to_token_pos, 207 const GrowableArray<TokenPosition>& inline_id_to_token_pos,
194 const GrowableArray<intptr_t>& caller_inline_id) 208 const GrowableArray<intptr_t>& caller_inline_id)
195 : thread_(Thread::Current()), 209 : thread_(Thread::Current()),
196 zone_(Thread::Current()->zone()), 210 zone_(Thread::Current()->zone()),
197 assembler_(assembler), 211 assembler_(assembler),
198 parsed_function_(parsed_function), 212 parsed_function_(parsed_function),
199 flow_graph_(*flow_graph), 213 flow_graph_(*flow_graph),
200 block_order_(*flow_graph->CodegenBlockOrder(is_optimizing)), 214 block_order_(*flow_graph->CodegenBlockOrder(is_optimizing)),
201 current_block_(NULL), 215 current_block_(NULL),
202 exception_handlers_list_(NULL), 216 exception_handlers_list_(NULL),
203 pc_descriptors_list_(NULL), 217 pc_descriptors_list_(NULL),
204 stackmap_table_builder_(NULL), 218 stackmap_table_builder_(NULL),
205 code_source_map_builder_(NULL), 219 code_source_map_builder_(NULL),
206 saved_code_size_(0), 220 saved_code_size_(0),
207 block_info_(block_order_.length()), 221 block_info_(block_order_.length()),
208 deopt_infos_(), 222 deopt_infos_(),
209 static_calls_target_table_(), 223 static_calls_target_table_(),
210 is_optimizing_(is_optimizing), 224 is_optimizing_(is_optimizing),
211 may_reoptimize_(false), 225 may_reoptimize_(false),
212 intrinsic_mode_(false), 226 intrinsic_mode_(false),
213 double_class_(Class::ZoneHandle( 227 double_class_(
214 isolate()->object_store()->double_class())), 228 Class::ZoneHandle(isolate()->object_store()->double_class())),
215 mint_class_(Class::ZoneHandle( 229 mint_class_(Class::ZoneHandle(isolate()->object_store()->mint_class())),
216 isolate()->object_store()->mint_class())), 230 float32x4_class_(
217 float32x4_class_(Class::ZoneHandle( 231 Class::ZoneHandle(isolate()->object_store()->float32x4_class())),
218 isolate()->object_store()->float32x4_class())), 232 float64x2_class_(
219 float64x2_class_(Class::ZoneHandle( 233 Class::ZoneHandle(isolate()->object_store()->float64x2_class())),
220 isolate()->object_store()->float64x2_class())), 234 int32x4_class_(
221 int32x4_class_(Class::ZoneHandle( 235 Class::ZoneHandle(isolate()->object_store()->int32x4_class())),
222 isolate()->object_store()->int32x4_class())), 236 list_class_(Class::ZoneHandle(Library::Handle(Library::CoreLibrary())
223 list_class_(Class::ZoneHandle( 237 .LookupClass(Symbols::List()))),
224 Library::Handle(Library::CoreLibrary()). 238 parallel_move_resolver_(this),
225 LookupClass(Symbols::List()))), 239 pending_deoptimization_env_(NULL),
226 parallel_move_resolver_(this), 240 deopt_id_to_ic_data_(NULL),
227 pending_deoptimization_env_(NULL), 241 edge_counters_array_(Array::ZoneHandle()),
228 deopt_id_to_ic_data_(NULL), 242 inlined_code_intervals_(Array::ZoneHandle(Object::empty_array().raw())),
229 edge_counters_array_(Array::ZoneHandle()), 243 inline_id_to_function_(inline_id_to_function),
230 inlined_code_intervals_(Array::ZoneHandle(Object::empty_array().raw())), 244 inline_id_to_token_pos_(inline_id_to_token_pos),
231 inline_id_to_function_(inline_id_to_function), 245 caller_inline_id_(caller_inline_id) {
232 inline_id_to_token_pos_(inline_id_to_token_pos),
233 caller_inline_id_(caller_inline_id) {
234 ASSERT(flow_graph->parsed_function().function().raw() == 246 ASSERT(flow_graph->parsed_function().function().raw() ==
235 parsed_function.function().raw()); 247 parsed_function.function().raw());
236 if (!is_optimizing) { 248 if (!is_optimizing) {
237 const intptr_t len = thread()->deopt_id(); 249 const intptr_t len = thread()->deopt_id();
238 deopt_id_to_ic_data_ = new(zone()) ZoneGrowableArray<const ICData*>(len); 250 deopt_id_to_ic_data_ = new (zone()) ZoneGrowableArray<const ICData*>(len);
239 deopt_id_to_ic_data_->SetLength(len); 251 deopt_id_to_ic_data_->SetLength(len);
240 for (intptr_t i = 0; i < len; i++) { 252 for (intptr_t i = 0; i < len; i++) {
241 (*deopt_id_to_ic_data_)[i] = NULL; 253 (*deopt_id_to_ic_data_)[i] = NULL;
242 } 254 }
243 // TODO(fschneider): Abstract iteration into ICDataArrayIterator. 255 // TODO(fschneider): Abstract iteration into ICDataArrayIterator.
244 const Array& old_saved_ic_data = Array::Handle(zone(), 256 const Array& old_saved_ic_data =
245 flow_graph->function().ic_data_array()); 257 Array::Handle(zone(), flow_graph->function().ic_data_array());
246 const intptr_t saved_len = 258 const intptr_t saved_len =
247 old_saved_ic_data.IsNull() ? 0 : old_saved_ic_data.Length(); 259 old_saved_ic_data.IsNull() ? 0 : old_saved_ic_data.Length();
248 for (intptr_t i = 1; i < saved_len; i++) { 260 for (intptr_t i = 1; i < saved_len; i++) {
249 ICData& ic_data = ICData::ZoneHandle(zone()); 261 ICData& ic_data = ICData::ZoneHandle(zone());
250 ic_data ^= old_saved_ic_data.At(i); 262 ic_data ^= old_saved_ic_data.At(i);
251 (*deopt_id_to_ic_data_)[ic_data.deopt_id()] = &ic_data; 263 (*deopt_id_to_ic_data_)[ic_data.deopt_id()] = &ic_data;
252 } 264 }
253 } 265 }
254 ASSERT(assembler != NULL); 266 ASSERT(assembler != NULL);
255 ASSERT(!list_class_.IsNull()); 267 ASSERT(!list_class_.IsNull());
256 } 268 }
257 269
258 270
259 bool FlowGraphCompiler::IsUnboxedField(const Field& field) { 271 bool FlowGraphCompiler::IsUnboxedField(const Field& field) {
260 bool valid_class = (SupportsUnboxedDoubles() && 272 bool valid_class =
261 (field.guarded_cid() == kDoubleCid)) || 273 (SupportsUnboxedDoubles() && (field.guarded_cid() == kDoubleCid)) ||
262 (SupportsUnboxedSimd128() && 274 (SupportsUnboxedSimd128() && (field.guarded_cid() == kFloat32x4Cid)) ||
263 (field.guarded_cid() == kFloat32x4Cid)) || 275 (SupportsUnboxedSimd128() && (field.guarded_cid() == kFloat64x2Cid));
264 (SupportsUnboxedSimd128() && 276 return field.is_unboxing_candidate() && !field.is_final() &&
265 (field.guarded_cid() == kFloat64x2Cid)); 277 !field.is_nullable() && valid_class;
266 return field.is_unboxing_candidate()
267 && !field.is_final()
268 && !field.is_nullable()
269 && valid_class;
270 } 278 }
271 279
272 280
273 bool FlowGraphCompiler::IsPotentialUnboxedField(const Field& field) { 281 bool FlowGraphCompiler::IsPotentialUnboxedField(const Field& field) {
274 return field.is_unboxing_candidate() && 282 return field.is_unboxing_candidate() &&
275 (FlowGraphCompiler::IsUnboxedField(field) || 283 (FlowGraphCompiler::IsUnboxedField(field) ||
276 (!field.is_final() && (field.guarded_cid() == kIllegalCid))); 284 (!field.is_final() && (field.guarded_cid() == kIllegalCid)));
277 } 285 }
278 286
279 287
280 void FlowGraphCompiler::InitCompiler() { 288 void FlowGraphCompiler::InitCompiler() {
281 pc_descriptors_list_ = new(zone()) DescriptorList(64); 289 pc_descriptors_list_ = new (zone()) DescriptorList(64);
282 exception_handlers_list_ = new(zone()) ExceptionHandlerList(); 290 exception_handlers_list_ = new (zone()) ExceptionHandlerList();
283 block_info_.Clear(); 291 block_info_.Clear();
284 // Conservative detection of leaf routines used to remove the stack check 292 // Conservative detection of leaf routines used to remove the stack check
285 // on function entry. 293 // on function entry.
286 bool is_leaf = is_optimizing() && !flow_graph().IsCompiledForOsr(); 294 bool is_leaf = is_optimizing() && !flow_graph().IsCompiledForOsr();
287 // Initialize block info and search optimized (non-OSR) code for calls 295 // Initialize block info and search optimized (non-OSR) code for calls
288 // indicating a non-leaf routine and calls without IC data indicating 296 // indicating a non-leaf routine and calls without IC data indicating
289 // possible reoptimization. 297 // possible reoptimization.
290 for (int i = 0; i < block_order_.length(); ++i) { 298 for (int i = 0; i < block_order_.length(); ++i) {
291 block_info_.Add(new(zone()) BlockInfo()); 299 block_info_.Add(new (zone()) BlockInfo());
292 if (is_optimizing() && !flow_graph().IsCompiledForOsr()) { 300 if (is_optimizing() && !flow_graph().IsCompiledForOsr()) {
293 BlockEntryInstr* entry = block_order_[i]; 301 BlockEntryInstr* entry = block_order_[i];
294 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { 302 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
295 Instruction* current = it.Current(); 303 Instruction* current = it.Current();
296 if (current->IsBranch()) { 304 if (current->IsBranch()) {
297 current = current->AsBranch()->comparison(); 305 current = current->AsBranch()->comparison();
298 } 306 }
299 // In optimized code, ICData is always set in the instructions. 307 // In optimized code, ICData is always set in the instructions.
300 const ICData* ic_data = NULL; 308 const ICData* ic_data = NULL;
301 if (current->IsInstanceCall()) { 309 if (current->IsInstanceCall()) {
302 ic_data = current->AsInstanceCall()->ic_data(); 310 ic_data = current->AsInstanceCall()->ic_data();
303 } 311 }
304 if ((ic_data != NULL) && (ic_data->NumberOfUsedChecks() == 0)) { 312 if ((ic_data != NULL) && (ic_data->NumberOfUsedChecks() == 0)) {
305 may_reoptimize_ = true; 313 may_reoptimize_ = true;
306 } 314 }
307 if (is_leaf && 315 if (is_leaf && !current->IsCheckStackOverflow() &&
308 !current->IsCheckStackOverflow() &&
309 !current->IsParallelMove()) { 316 !current->IsParallelMove()) {
310 // Note that we do not care if the code contains instructions that 317 // Note that we do not care if the code contains instructions that
311 // can deoptimize. 318 // can deoptimize.
312 LocationSummary* locs = current->locs(); 319 LocationSummary* locs = current->locs();
313 if ((locs != NULL) && locs->can_call()) { 320 if ((locs != NULL) && locs->can_call()) {
314 is_leaf = false; 321 is_leaf = false;
315 } 322 }
316 } 323 }
317 } 324 }
318 } 325 }
(...skipping 26 matching lines...) Expand all
345 return CanOptimize() && !parsed_function().function().HasBreakpoint(); 352 return CanOptimize() && !parsed_function().function().HasBreakpoint();
346 } 353 }
347 354
348 355
349 bool FlowGraphCompiler::CanOSRFunction() const { 356 bool FlowGraphCompiler::CanOSRFunction() const {
350 return FLAG_use_osr & CanOptimizeFunction() && !is_optimizing(); 357 return FLAG_use_osr & CanOptimizeFunction() && !is_optimizing();
351 } 358 }
352 359
353 360
354 bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const { 361 bool FlowGraphCompiler::ForceSlowPathForStackOverflow() const {
355 if ((FLAG_stacktrace_every > 0) || 362 if ((FLAG_stacktrace_every > 0) || (FLAG_deoptimize_every > 0) ||
356 (FLAG_deoptimize_every > 0) ||
357 (isolate()->reload_every_n_stack_overflow_checks() > 0)) { 363 (isolate()->reload_every_n_stack_overflow_checks() > 0)) {
358 return true; 364 return true;
359 } 365 }
360 if (FLAG_stacktrace_filter != NULL && 366 if (FLAG_stacktrace_filter != NULL &&
361 strstr(parsed_function().function().ToFullyQualifiedCString(), 367 strstr(parsed_function().function().ToFullyQualifiedCString(),
362 FLAG_stacktrace_filter) != NULL) { 368 FLAG_stacktrace_filter) != NULL) {
363 return true; 369 return true;
364 } 370 }
365 if (is_optimizing() && 371 if (is_optimizing() && FLAG_deoptimize_filter != NULL &&
366 FLAG_deoptimize_filter != NULL &&
367 strstr(parsed_function().function().ToFullyQualifiedCString(), 372 strstr(parsed_function().function().ToFullyQualifiedCString(),
368 FLAG_deoptimize_filter) != NULL) { 373 FLAG_deoptimize_filter) != NULL) {
369 return true; 374 return true;
370 } 375 }
371 return false; 376 return false;
372 } 377 }
373 378
374 379
375 static bool IsEmptyBlock(BlockEntryInstr* block) { 380 static bool IsEmptyBlock(BlockEntryInstr* block) {
376 return !block->IsCatchBlockEntry() && 381 return !block->IsCatchBlockEntry() && !block->HasNonRedundantParallelMove() &&
377 !block->HasNonRedundantParallelMove() &&
378 block->next()->IsGoto() && 382 block->next()->IsGoto() &&
379 !block->next()->AsGoto()->HasNonRedundantParallelMove() && 383 !block->next()->AsGoto()->HasNonRedundantParallelMove() &&
380 !block->IsIndirectEntry(); 384 !block->IsIndirectEntry();
381 } 385 }
382 386
383 387
384 void FlowGraphCompiler::CompactBlock(BlockEntryInstr* block) { 388 void FlowGraphCompiler::CompactBlock(BlockEntryInstr* block) {
385 BlockInfo* block_info = block_info_[block->postorder_number()]; 389 BlockInfo* block_info = block_info_[block->postorder_number()];
386 390
387 // Break out of cycles in the control flow graph. 391 // Break out of cycles in the control flow graph.
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
428 block_info->set_next_nonempty_label(nonempty_label); 432 block_info->set_next_nonempty_label(nonempty_label);
429 } 433 }
430 434
431 435
432 void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) { 436 void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
433 if (!is_optimizing()) { 437 if (!is_optimizing()) {
434 if (instr->CanBecomeDeoptimizationTarget() && !instr->IsGoto()) { 438 if (instr->CanBecomeDeoptimizationTarget() && !instr->IsGoto()) {
435 // Instructions that can be deoptimization targets need to record kDeopt 439 // Instructions that can be deoptimization targets need to record kDeopt
436 // PcDescriptor corresponding to their deopt id. GotoInstr records its 440 // PcDescriptor corresponding to their deopt id. GotoInstr records its
437 // own so that it can control the placement. 441 // own so that it can control the placement.
438 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 442 AddCurrentDescriptor(RawPcDescriptors::kDeopt, instr->deopt_id(),
439 instr->deopt_id(),
440 instr->token_pos()); 443 instr->token_pos());
441 } 444 }
442 AllocateRegistersLocally(instr); 445 AllocateRegistersLocally(instr);
443 } else if (instr->MayThrow() && 446 } else if (instr->MayThrow() &&
444 (CurrentTryIndex() != CatchClauseNode::kInvalidTryIndex)) { 447 (CurrentTryIndex() != CatchClauseNode::kInvalidTryIndex)) {
445 // Optimized try-block: Sync locals to fixed stack locations. 448 // Optimized try-block: Sync locals to fixed stack locations.
446 EmitTrySync(instr, CurrentTryIndex()); 449 EmitTrySync(instr, CurrentTryIndex());
447 } 450 }
448 } 451 }
449 452
450 453
451
452 void FlowGraphCompiler::EmitSourceLine(Instruction* instr) { 454 void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
453 if (!instr->token_pos().IsReal() || (instr->env() == NULL)) { 455 if (!instr->token_pos().IsReal() || (instr->env() == NULL)) {
454 return; 456 return;
455 } 457 }
456 const Script& script = 458 const Script& script =
457 Script::Handle(zone(), instr->env()->function().script()); 459 Script::Handle(zone(), instr->env()->function().script());
458 intptr_t line_nr; 460 intptr_t line_nr;
459 intptr_t column_nr; 461 intptr_t column_nr;
460 script.GetTokenLocation(instr->token_pos(), &line_nr, &column_nr); 462 script.GetTokenLocation(instr->token_pos(), &line_nr, &column_nr);
461 const String& line = String::Handle(zone(), script.GetLine(line_nr)); 463 const String& line = String::Handle(zone(), script.GetLine(line_nr));
462 assembler()->Comment("Line %" Pd " in '%s':\n %s", 464 assembler()->Comment("Line %" Pd " in '%s':\n %s", line_nr,
463 line_nr, 465 instr->env()->function().ToFullyQualifiedCString(),
464 instr->env()->function().ToFullyQualifiedCString(), 466 line.ToCString());
465 line.ToCString());
466 } 467 }
467 468
468 469
469 static void LoopInfoComment( 470 static void LoopInfoComment(
470 Assembler* assembler, 471 Assembler* assembler,
471 const BlockEntryInstr& block, 472 const BlockEntryInstr& block,
472 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers) { 473 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers) {
473 if (Assembler::EmittingComments()) { 474 if (Assembler::EmittingComments()) {
474 for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) { 475 for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) {
475 for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info()); 476 for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info());
476 !loop_it.Done(); 477 !loop_it.Done(); loop_it.Advance()) {
477 loop_it.Advance()) {
478 if (loop_it.Current() == block.preorder_number()) { 478 if (loop_it.Current() == block.preorder_number()) {
479 assembler->Comment(" Loop %" Pd "", loop_id); 479 assembler->Comment(" Loop %" Pd "", loop_id);
480 } 480 }
481 } 481 }
482 } 482 }
483 } 483 }
484 } 484 }
485 485
486 486
487 // We collect intervals while generating code. 487 // We collect intervals while generating code.
488 struct IntervalStruct { 488 struct IntervalStruct {
489 // 'start' is the pc-offsets where the inlined code started. 489 // 'start' is the pc-offsets where the inlined code started.
490 // 'pos' is the token position where the inlined call occured. 490 // 'pos' is the token position where the inlined call occured.
491 intptr_t start; 491 intptr_t start;
492 TokenPosition pos; 492 TokenPosition pos;
493 intptr_t inlining_id; 493 intptr_t inlining_id;
494 IntervalStruct(intptr_t s, TokenPosition tp, intptr_t id) 494 IntervalStruct(intptr_t s, TokenPosition tp, intptr_t id)
495 : start(s), pos(tp), inlining_id(id) {} 495 : start(s), pos(tp), inlining_id(id) {}
496 void Dump() { 496 void Dump() {
497 THR_Print("start: 0x%" Px " iid: %" Pd " pos: %s", 497 THR_Print("start: 0x%" Px " iid: %" Pd " pos: %s", start, inlining_id,
498 start, inlining_id, pos.ToCString()); 498 pos.ToCString());
499 } 499 }
500 }; 500 };
501 501
502 502
503 void FlowGraphCompiler::VisitBlocks() { 503 void FlowGraphCompiler::VisitBlocks() {
504 CompactBlocks(); 504 CompactBlocks();
505 const ZoneGrowableArray<BlockEntryInstr*>* loop_headers = NULL; 505 const ZoneGrowableArray<BlockEntryInstr*>* loop_headers = NULL;
506 if (Assembler::EmittingComments()) { 506 if (Assembler::EmittingComments()) {
507 // 'loop_headers' were cleared, recompute. 507 // 'loop_headers' were cleared, recompute.
508 loop_headers = flow_graph().ComputeLoops(); 508 loop_headers = flow_graph().ComputeLoops();
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
555 } else { 555 } else {
556 // We will add this token position later when generating the 556 // We will add this token position later when generating the
557 // profile. 557 // profile.
558 prev_inlining_pos = TokenPosition::kNoSource; 558 prev_inlining_pos = TokenPosition::kNoSource;
559 } 559 }
560 if (prev_inlining_id > max_inlining_id) { 560 if (prev_inlining_id > max_inlining_id) {
561 max_inlining_id = prev_inlining_id; 561 max_inlining_id = prev_inlining_id;
562 } 562 }
563 } 563 }
564 } 564 }
565 if (FLAG_code_comments || 565 if (FLAG_code_comments || FLAG_disassemble ||
566 FLAG_disassemble || FLAG_disassemble_optimized) { 566 FLAG_disassemble_optimized) {
567 if (FLAG_source_lines) { 567 if (FLAG_source_lines) {
568 EmitSourceLine(instr); 568 EmitSourceLine(instr);
569 } 569 }
570 EmitComment(instr); 570 EmitComment(instr);
571 } 571 }
572 if (instr->IsParallelMove()) { 572 if (instr->IsParallelMove()) {
573 parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove()); 573 parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove());
574 } else { 574 } else {
575 BeginCodeSourceRange(); 575 BeginCodeSourceRange();
576 EmitInstructionPrologue(instr); 576 EmitInstructionPrologue(instr);
(...skipping 25 matching lines...) Expand all
602 Array::New(intervals.length() * Code::kInlIntNumEntries, Heap::kOld); 602 Array::New(intervals.length() * Code::kInlIntNumEntries, Heap::kOld);
603 Smi& start_h = Smi::Handle(); 603 Smi& start_h = Smi::Handle();
604 Smi& caller_inline_id = Smi::Handle(); 604 Smi& caller_inline_id = Smi::Handle();
605 Smi& inline_id = Smi::Handle(); 605 Smi& inline_id = Smi::Handle();
606 for (intptr_t i = 0; i < intervals.length(); i++) { 606 for (intptr_t i = 0; i < intervals.length(); i++) {
607 if (FLAG_trace_inlining_intervals && is_optimizing()) { 607 if (FLAG_trace_inlining_intervals && is_optimizing()) {
608 const Function& function = 608 const Function& function =
609 *inline_id_to_function_.At(intervals[i].inlining_id); 609 *inline_id_to_function_.At(intervals[i].inlining_id);
610 intervals[i].Dump(); 610 intervals[i].Dump();
611 THR_Print(" parent iid %" Pd " %s\n", 611 THR_Print(" parent iid %" Pd " %s\n",
612 caller_inline_id_[intervals[i].inlining_id], 612 caller_inline_id_[intervals[i].inlining_id],
613 function.ToQualifiedCString()); 613 function.ToQualifiedCString());
614 } 614 }
615 615
616 const intptr_t id = intervals[i].inlining_id; 616 const intptr_t id = intervals[i].inlining_id;
617 start_h = Smi::New(intervals[i].start); 617 start_h = Smi::New(intervals[i].start);
618 inline_id = Smi::New(id); 618 inline_id = Smi::New(id);
619 caller_inline_id = Smi::New(caller_inline_id_[intervals[i].inlining_id]); 619 caller_inline_id = Smi::New(caller_inline_id_[intervals[i].inlining_id]);
620 620
621 const intptr_t p = i * Code::kInlIntNumEntries; 621 const intptr_t p = i * Code::kInlIntNumEntries;
622 inlined_code_intervals_.SetAt(p + Code::kInlIntStart, start_h); 622 inlined_code_intervals_.SetAt(p + Code::kInlIntStart, start_h);
623 inlined_code_intervals_.SetAt(p + Code::kInlIntInliningId, inline_id); 623 inlined_code_intervals_.SetAt(p + Code::kInlIntInliningId, inline_id);
624 } 624 }
625 } 625 }
626 set_current_block(NULL); 626 set_current_block(NULL);
627 if (FLAG_trace_inlining_intervals && is_optimizing()) { 627 if (FLAG_trace_inlining_intervals && is_optimizing()) {
628 LogBlock lb; 628 LogBlock lb;
629 THR_Print("Intervals:\n"); 629 THR_Print("Intervals:\n");
630 for (intptr_t cc = 0; cc < caller_inline_id_.length(); cc++) { 630 for (intptr_t cc = 0; cc < caller_inline_id_.length(); cc++) {
631 THR_Print(" iid: %" Pd " caller iid: %" Pd "\n", 631 THR_Print(" iid: %" Pd " caller iid: %" Pd "\n", cc,
632 cc, caller_inline_id_[cc]); 632 caller_inline_id_[cc]);
633 } 633 }
634 Smi& temp = Smi::Handle(); 634 Smi& temp = Smi::Handle();
635 for (intptr_t i = 0; i < inlined_code_intervals_.Length(); 635 for (intptr_t i = 0; i < inlined_code_intervals_.Length();
636 i += Code::kInlIntNumEntries) { 636 i += Code::kInlIntNumEntries) {
637 temp ^= inlined_code_intervals_.At(i + Code::kInlIntStart); 637 temp ^= inlined_code_intervals_.At(i + Code::kInlIntStart);
638 ASSERT(!temp.IsNull()); 638 ASSERT(!temp.IsNull());
639 THR_Print("% " Pd " start: 0x%" Px " ", i, temp.Value()); 639 THR_Print("% " Pd " start: 0x%" Px " ", i, temp.Value());
640 temp ^= inlined_code_intervals_.At(i + Code::kInlIntInliningId); 640 temp ^= inlined_code_intervals_.At(i + Code::kInlIntInliningId);
641 THR_Print("iid: %" Pd " ", temp.Value()); 641 THR_Print("iid: %" Pd " ", temp.Value());
642 } 642 }
(...skipping 14 matching lines...) Expand all
657 const GrowableArray<Definition*>* idefs = catch_block->initial_definitions(); 657 const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
658 658
659 // Construct a ParallelMove instruction for parameters and locals. Skip the 659 // Construct a ParallelMove instruction for parameters and locals. Skip the
660 // special locals exception_var and stacktrace_var since they will be filled 660 // special locals exception_var and stacktrace_var since they will be filled
661 // when an exception is thrown. Constant locations are known to be the same 661 // when an exception is thrown. Constant locations are known to be the same
662 // at all instructions that may throw, and do not need to be materialized. 662 // at all instructions that may throw, and do not need to be materialized.
663 663
664 // Parameters first. 664 // Parameters first.
665 intptr_t i = 0; 665 intptr_t i = 0;
666 const intptr_t num_non_copied_params = flow_graph().num_non_copied_params(); 666 const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
667 ParallelMoveInstr* move_instr = new(zone()) ParallelMoveInstr(); 667 ParallelMoveInstr* move_instr = new (zone()) ParallelMoveInstr();
668 for (; i < num_non_copied_params; ++i) { 668 for (; i < num_non_copied_params; ++i) {
669 // Don't sync captured parameters. They are not in the environment. 669 // Don't sync captured parameters. They are not in the environment.
670 if (flow_graph().captured_parameters()->Contains(i)) continue; 670 if (flow_graph().captured_parameters()->Contains(i)) continue;
671 if ((*idefs)[i]->IsConstant()) continue; // Common constants 671 if ((*idefs)[i]->IsConstant()) continue; // Common constants
672 Location src = env->LocationAt(i); 672 Location src = env->LocationAt(i);
673 #if defined(TARGET_ARCH_DBC) 673 #if defined(TARGET_ARCH_DBC)
674 intptr_t dest_index = kNumberOfCpuRegisters - 1 - i; 674 intptr_t dest_index = kNumberOfCpuRegisters - 1 - i;
675 Location dest = Location::RegisterLocation(dest_index); 675 Location dest = Location::RegisterLocation(dest_index);
676 // Update safepoint bitmap to indicate that the target location 676 // Update safepoint bitmap to indicate that the target location
677 // now contains a pointer. With DBC parameters are copied into 677 // now contains a pointer. With DBC parameters are copied into
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
710 } 710 }
711 parallel_move_resolver()->EmitNativeCode(move_instr); 711 parallel_move_resolver()->EmitNativeCode(move_instr);
712 } 712 }
713 713
714 714
715 intptr_t FlowGraphCompiler::StackSize() const { 715 intptr_t FlowGraphCompiler::StackSize() const {
716 if (is_optimizing_) { 716 if (is_optimizing_) {
717 return flow_graph_.graph_entry()->spill_slot_count(); 717 return flow_graph_.graph_entry()->spill_slot_count();
718 } else { 718 } else {
719 return parsed_function_.num_stack_locals() + 719 return parsed_function_.num_stack_locals() +
720 parsed_function_.num_copied_params(); 720 parsed_function_.num_copied_params();
721 } 721 }
722 } 722 }
723 723
724 724
725 Label* FlowGraphCompiler::GetJumpLabel( 725 Label* FlowGraphCompiler::GetJumpLabel(BlockEntryInstr* block_entry) const {
726 BlockEntryInstr* block_entry) const {
727 const intptr_t block_index = block_entry->postorder_number(); 726 const intptr_t block_index = block_entry->postorder_number();
728 return block_info_[block_index]->jump_label(); 727 return block_info_[block_index]->jump_label();
729 } 728 }
730 729
731 730
732 bool FlowGraphCompiler::WasCompacted( 731 bool FlowGraphCompiler::WasCompacted(BlockEntryInstr* block_entry) const {
733 BlockEntryInstr* block_entry) const {
734 const intptr_t block_index = block_entry->postorder_number(); 732 const intptr_t block_index = block_entry->postorder_number();
735 return block_info_[block_index]->WasCompacted(); 733 return block_info_[block_index]->WasCompacted();
736 } 734 }
737 735
738 736
739 Label* FlowGraphCompiler::NextNonEmptyLabel() const { 737 Label* FlowGraphCompiler::NextNonEmptyLabel() const {
740 const intptr_t current_index = current_block()->postorder_number(); 738 const intptr_t current_index = current_block()->postorder_number();
741 return block_info_[current_index]->next_nonempty_label(); 739 return block_info_[current_index]->next_nonempty_label();
742 } 740 }
743 741
744 742
745 bool FlowGraphCompiler::CanFallThroughTo(BlockEntryInstr* block_entry) const { 743 bool FlowGraphCompiler::CanFallThroughTo(BlockEntryInstr* block_entry) const {
746 return NextNonEmptyLabel() == GetJumpLabel(block_entry); 744 return NextNonEmptyLabel() == GetJumpLabel(block_entry);
747 } 745 }
748 746
749 747
750 BranchLabels FlowGraphCompiler::CreateBranchLabels(BranchInstr* branch) const { 748 BranchLabels FlowGraphCompiler::CreateBranchLabels(BranchInstr* branch) const {
751 Label* true_label = GetJumpLabel(branch->true_successor()); 749 Label* true_label = GetJumpLabel(branch->true_successor());
752 Label* false_label = GetJumpLabel(branch->false_successor()); 750 Label* false_label = GetJumpLabel(branch->false_successor());
753 Label* fall_through = NextNonEmptyLabel(); 751 Label* fall_through = NextNonEmptyLabel();
754 BranchLabels result = { true_label, false_label, fall_through }; 752 BranchLabels result = {true_label, false_label, fall_through};
755 return result; 753 return result;
756 } 754 }
757 755
758 756
759 void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) { 757 void FlowGraphCompiler::AddSlowPathCode(SlowPathCode* code) {
760 slow_path_code_.Add(code); 758 slow_path_code_.Add(code);
761 } 759 }
762 760
763 761
764 void FlowGraphCompiler::GenerateDeferredCode() { 762 void FlowGraphCompiler::GenerateDeferredCode() {
765 for (intptr_t i = 0; i < slow_path_code_.length(); i++) { 763 for (intptr_t i = 0; i < slow_path_code_.length(); i++) {
766 BeginCodeSourceRange(); 764 BeginCodeSourceRange();
767 slow_path_code_[i]->GenerateCode(this); 765 slow_path_code_[i]->GenerateCode(this);
768 EndCodeSourceRange(TokenPosition::kDeferredSlowPath); 766 EndCodeSourceRange(TokenPosition::kDeferredSlowPath);
769 } 767 }
770 for (intptr_t i = 0; i < deopt_infos_.length(); i++) { 768 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
771 BeginCodeSourceRange(); 769 BeginCodeSourceRange();
772 deopt_infos_[i]->GenerateCode(this, i); 770 deopt_infos_[i]->GenerateCode(this, i);
773 EndCodeSourceRange(TokenPosition::kDeferredDeoptInfo); 771 EndCodeSourceRange(TokenPosition::kDeferredDeoptInfo);
774 } 772 }
775 } 773 }
776 774
777 775
778 void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index, 776 void FlowGraphCompiler::AddExceptionHandler(intptr_t try_index,
779 intptr_t outer_try_index, 777 intptr_t outer_try_index,
780 intptr_t pc_offset, 778 intptr_t pc_offset,
781 const Array& handler_types, 779 const Array& handler_types,
782 bool needs_stacktrace) { 780 bool needs_stacktrace) {
783 exception_handlers_list_->AddHandler(try_index, 781 exception_handlers_list_->AddHandler(try_index, outer_try_index, pc_offset,
784 outer_try_index, 782 handler_types, needs_stacktrace);
785 pc_offset,
786 handler_types,
787 needs_stacktrace);
788 } 783 }
789 784
790 785
791 void FlowGraphCompiler::SetNeedsStacktrace(intptr_t try_index) { 786 void FlowGraphCompiler::SetNeedsStacktrace(intptr_t try_index) {
792 exception_handlers_list_->SetNeedsStacktrace(try_index); 787 exception_handlers_list_->SetNeedsStacktrace(try_index);
793 } 788 }
794 789
795 790
796 // Uses current pc position and try-index. 791 // Uses current pc position and try-index.
797 void FlowGraphCompiler::AddCurrentDescriptor(RawPcDescriptors::Kind kind, 792 void FlowGraphCompiler::AddCurrentDescriptor(RawPcDescriptors::Kind kind,
798 intptr_t deopt_id, 793 intptr_t deopt_id,
799 TokenPosition token_pos) { 794 TokenPosition token_pos) {
800 // When running with optimizations disabled, don't emit deopt-descriptors. 795 // When running with optimizations disabled, don't emit deopt-descriptors.
801 if (!CanOptimize() && (kind == RawPcDescriptors::kDeopt)) return; 796 if (!CanOptimize() && (kind == RawPcDescriptors::kDeopt)) return;
802 pc_descriptors_list()->AddDescriptor(kind, 797 pc_descriptors_list()->AddDescriptor(kind, assembler()->CodeSize(), deopt_id,
803 assembler()->CodeSize(), 798 token_pos, CurrentTryIndex());
804 deopt_id,
805 token_pos,
806 CurrentTryIndex());
807 } 799 }
808 800
809 801
810 void FlowGraphCompiler::AddStaticCallTarget(const Function& func) { 802 void FlowGraphCompiler::AddStaticCallTarget(const Function& func) {
811 ASSERT(func.IsZoneHandle()); 803 ASSERT(func.IsZoneHandle());
812 static_calls_target_table_.Add( 804 static_calls_target_table_.Add(
813 new(zone()) StaticCallsStruct(assembler()->CodeSize(), &func, NULL)); 805 new (zone()) StaticCallsStruct(assembler()->CodeSize(), &func, NULL));
814 } 806 }
815 807
816 808
817 void FlowGraphCompiler::AddStubCallTarget(const Code& code) { 809 void FlowGraphCompiler::AddStubCallTarget(const Code& code) {
818 ASSERT(code.IsZoneHandle()); 810 ASSERT(code.IsZoneHandle());
819 static_calls_target_table_.Add( 811 static_calls_target_table_.Add(
820 new(zone()) StaticCallsStruct(assembler()->CodeSize(), NULL, &code)); 812 new (zone()) StaticCallsStruct(assembler()->CodeSize(), NULL, &code));
821 } 813 }
822 814
823 815
824 void FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) { 816 void FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) {
825 ASSERT(is_optimizing()); 817 ASSERT(is_optimizing());
826 ASSERT(!intrinsic_mode()); 818 ASSERT(!intrinsic_mode());
827 CompilerDeoptInfo* info = 819 CompilerDeoptInfo* info =
828 new(zone()) CompilerDeoptInfo(deopt_id, 820 new (zone()) CompilerDeoptInfo(deopt_id, ICData::kDeoptAtCall,
829 ICData::kDeoptAtCall, 821 0, // No flags.
830 0, // No flags. 822 pending_deoptimization_env_);
831 pending_deoptimization_env_);
832 info->set_pc_offset(assembler()->CodeSize()); 823 info->set_pc_offset(assembler()->CodeSize());
833 deopt_infos_.Add(info); 824 deopt_infos_.Add(info);
834 } 825 }
835 826
836 827
837 // This function must be in sync with FlowGraphCompiler::SaveLiveRegisters 828 // This function must be in sync with FlowGraphCompiler::SaveLiveRegisters
838 // and FlowGraphCompiler::SlowPathEnvironmentFor. 829 // and FlowGraphCompiler::SlowPathEnvironmentFor.
839 // See StackFrame::VisitObjectPointers for the details of how stack map is 830 // See StackFrame::VisitObjectPointers for the details of how stack map is
840 // interpreted. 831 // interpreted.
841 void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs, 832 void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs,
842 intptr_t slow_path_argument_count) { 833 intptr_t slow_path_argument_count) {
843 if (is_optimizing() || locs->live_registers()->HasUntaggedValues()) { 834 if (is_optimizing() || locs->live_registers()->HasUntaggedValues()) {
844 const intptr_t spill_area_size = is_optimizing() ? 835 const intptr_t spill_area_size =
845 flow_graph_.graph_entry()->spill_slot_count() : 0; 836 is_optimizing() ? flow_graph_.graph_entry()->spill_slot_count() : 0;
846 837
847 RegisterSet* registers = locs->live_registers(); 838 RegisterSet* registers = locs->live_registers();
848 ASSERT(registers != NULL); 839 ASSERT(registers != NULL);
849 const intptr_t kFpuRegisterSpillFactor = 840 const intptr_t kFpuRegisterSpillFactor = kFpuRegisterSize / kWordSize;
850 kFpuRegisterSize / kWordSize; 841 const intptr_t live_registers_size =
851 const intptr_t live_registers_size = registers->CpuRegisterCount() + 842 registers->CpuRegisterCount() +
852 (registers->FpuRegisterCount() * kFpuRegisterSpillFactor); 843 (registers->FpuRegisterCount() * kFpuRegisterSpillFactor);
853 844
854 BitmapBuilder* bitmap = locs->stack_bitmap(); 845 BitmapBuilder* bitmap = locs->stack_bitmap();
855 846
856 // An instruction may have two safepoints in deferred code. The 847 // An instruction may have two safepoints in deferred code. The
857 // call to RecordSafepoint has the side-effect of appending the live 848 // call to RecordSafepoint has the side-effect of appending the live
858 // registers to the bitmap. This is why the second call to RecordSafepoint 849 // registers to the bitmap. This is why the second call to RecordSafepoint
859 // with the same instruction (and same location summary) sees a bitmap that 850 // with the same instruction (and same location summary) sees a bitmap that
860 // is larger that StackSize(). It will never be larger than StackSize() + 851 // is larger that StackSize(). It will never be larger than StackSize() +
861 // live_registers_size. 852 // live_registers_size.
862 // The first safepoint will grow the bitmap to be the size of 853 // The first safepoint will grow the bitmap to be the size of
863 // spill_area_size but the second safepoint will truncate the bitmap and 854 // spill_area_size but the second safepoint will truncate the bitmap and
864 // append the live registers to it again. The bitmap produced by both calls 855 // append the live registers to it again. The bitmap produced by both calls
865 // will be the same. 856 // will be the same.
866 #if !defined(TARGET_ARCH_DBC) 857 #if !defined(TARGET_ARCH_DBC)
867 ASSERT(bitmap->Length() <= (spill_area_size + live_registers_size)); 858 ASSERT(bitmap->Length() <= (spill_area_size + live_registers_size));
868 bitmap->SetLength(spill_area_size); 859 bitmap->SetLength(spill_area_size);
869 #else 860 #else
870 if (bitmap->Length() <= (spill_area_size + live_registers_size)) { 861 if (bitmap->Length() <= (spill_area_size + live_registers_size)) {
871 bitmap->SetLength(Utils::Maximum(bitmap->Length(), spill_area_size)); 862 bitmap->SetLength(Utils::Maximum(bitmap->Length(), spill_area_size));
872 } 863 }
873 #endif 864 #endif
874 865
875 // Mark the bits in the stack map in the same order we push registers in 866 // Mark the bits in the stack map in the same order we push registers in
(...skipping 30 matching lines...) Expand all
906 } 897 }
907 898
908 // Arguments pushed on top of live registers in the slow path are tagged. 899 // Arguments pushed on top of live registers in the slow path are tagged.
909 for (intptr_t i = 0; i < slow_path_argument_count; ++i) { 900 for (intptr_t i = 0; i < slow_path_argument_count; ++i) {
910 bitmap->Set(bitmap->Length(), true); 901 bitmap->Set(bitmap->Length(), true);
911 } 902 }
912 903
913 // The slow path area Outside the spill area contains are live registers 904 // The slow path area Outside the spill area contains are live registers
914 // and pushed arguments for calls inside the slow path. 905 // and pushed arguments for calls inside the slow path.
915 intptr_t slow_path_bit_count = bitmap->Length() - spill_area_size; 906 intptr_t slow_path_bit_count = bitmap->Length() - spill_area_size;
916 stackmap_table_builder()->AddEntry(assembler()->CodeSize(), 907 stackmap_table_builder()->AddEntry(assembler()->CodeSize(), bitmap,
917 bitmap,
918 slow_path_bit_count); 908 slow_path_bit_count);
919 } 909 }
920 } 910 }
921 911
922 912
923 // This function must be kept in sync with: 913 // This function must be kept in sync with:
924 // 914 //
925 // FlowGraphCompiler::RecordSafepoint 915 // FlowGraphCompiler::RecordSafepoint
926 // FlowGraphCompiler::SaveLiveRegisters 916 // FlowGraphCompiler::SaveLiveRegisters
927 // MaterializeObjectInstr::RemapRegisters 917 // MaterializeObjectInstr::RemapRegisters
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
962 } else { 952 } else {
963 cpu_reg_slots[i] = -1; 953 cpu_reg_slots[i] = -1;
964 } 954 }
965 } 955 }
966 956
967 // 2. Iterate the environment and replace register locations with the 957 // 2. Iterate the environment and replace register locations with the
968 // corresponding spill slot locations. 958 // corresponding spill slot locations.
969 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) { 959 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
970 Location loc = it.CurrentLocation(); 960 Location loc = it.CurrentLocation();
971 Value* value = it.CurrentValue(); 961 Value* value = it.CurrentValue();
972 it.SetCurrentLocation(loc.RemapForSlowPath( 962 it.SetCurrentLocation(loc.RemapForSlowPath(value->definition(),
973 value->definition(), cpu_reg_slots, fpu_reg_slots)); 963 cpu_reg_slots, fpu_reg_slots));
974 } 964 }
975 965
976 return env; 966 return env;
977 } 967 }
978 968
979 969
980 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, 970 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id,
981 ICData::DeoptReasonId reason, 971 ICData::DeoptReasonId reason,
982 uint32_t flags) { 972 uint32_t flags) {
983 if (intrinsic_mode()) { 973 if (intrinsic_mode()) {
984 return &intrinsic_slow_path_label_; 974 return &intrinsic_slow_path_label_;
985 } 975 }
986 976
987 // No deoptimization allowed when 'FLAG_precompiled_mode' is set. 977 // No deoptimization allowed when 'FLAG_precompiled_mode' is set.
988 if (FLAG_precompiled_mode) { 978 if (FLAG_precompiled_mode) {
989 if (FLAG_trace_compiler) { 979 if (FLAG_trace_compiler) {
990 THR_Print( 980 THR_Print(
991 "Retrying compilation %s, suppressing inlining of deopt_id:%" Pd "\n", 981 "Retrying compilation %s, suppressing inlining of deopt_id:%" Pd "\n",
992 parsed_function_.function().ToFullyQualifiedCString(), deopt_id); 982 parsed_function_.function().ToFullyQualifiedCString(), deopt_id);
993 } 983 }
994 ASSERT(deopt_id != 0); // longjmp must return non-zero value. 984 ASSERT(deopt_id != 0); // longjmp must return non-zero value.
995 Thread::Current()->long_jump_base()->Jump( 985 Thread::Current()->long_jump_base()->Jump(
996 deopt_id, Object::speculative_inlining_error()); 986 deopt_id, Object::speculative_inlining_error());
997 } 987 }
998 988
999 ASSERT(is_optimizing_); 989 ASSERT(is_optimizing_);
1000 CompilerDeoptInfoWithStub* stub = 990 CompilerDeoptInfoWithStub* stub = new (zone()) CompilerDeoptInfoWithStub(
1001 new(zone()) CompilerDeoptInfoWithStub(deopt_id, 991 deopt_id, reason, flags, pending_deoptimization_env_);
1002 reason,
1003 flags,
1004 pending_deoptimization_env_);
1005 deopt_infos_.Add(stub); 992 deopt_infos_.Add(stub);
1006 return stub->entry_label(); 993 return stub->entry_label();
1007 } 994 }
1008 995
1009 996
1010 #if defined(TARGET_ARCH_DBC) 997 #if defined(TARGET_ARCH_DBC)
1011 void FlowGraphCompiler::EmitDeopt(intptr_t deopt_id, 998 void FlowGraphCompiler::EmitDeopt(intptr_t deopt_id,
1012 ICData::DeoptReasonId reason, 999 ICData::DeoptReasonId reason,
1013 uint32_t flags) { 1000 uint32_t flags) {
1014 ASSERT(is_optimizing()); 1001 ASSERT(is_optimizing());
1015 ASSERT(!intrinsic_mode()); 1002 ASSERT(!intrinsic_mode());
1016 // The pending deoptimization environment may be changed after this deopt is 1003 // The pending deoptimization environment may be changed after this deopt is
1017 // emitted, so we need to make a copy. 1004 // emitted, so we need to make a copy.
1018 Environment* env_copy = 1005 Environment* env_copy = pending_deoptimization_env_->DeepCopy(zone());
1019 pending_deoptimization_env_->DeepCopy(zone());
1020 CompilerDeoptInfo* info = 1006 CompilerDeoptInfo* info =
1021 new(zone()) CompilerDeoptInfo(deopt_id, 1007 new (zone()) CompilerDeoptInfo(deopt_id, reason, flags, env_copy);
1022 reason,
1023 flags,
1024 env_copy);
1025 deopt_infos_.Add(info); 1008 deopt_infos_.Add(info);
1026 assembler()->Deopt(0, /*is_eager =*/ 1); 1009 assembler()->Deopt(0, /*is_eager =*/1);
1027 info->set_pc_offset(assembler()->CodeSize()); 1010 info->set_pc_offset(assembler()->CodeSize());
1028 } 1011 }
1029 #endif // defined(TARGET_ARCH_DBC) 1012 #endif // defined(TARGET_ARCH_DBC)
1030 1013
1031 1014
1032 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) { 1015 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) {
1033 ASSERT(exception_handlers_list_ != NULL); 1016 ASSERT(exception_handlers_list_ != NULL);
1034 const ExceptionHandlers& handlers = ExceptionHandlers::Handle( 1017 const ExceptionHandlers& handlers = ExceptionHandlers::Handle(
1035 exception_handlers_list_->FinalizeExceptionHandlers(code.PayloadStart())); 1018 exception_handlers_list_->FinalizeExceptionHandlers(code.PayloadStart()));
1036 code.set_exception_handlers(handlers); 1019 code.set_exception_handlers(handlers);
1037 if (FLAG_compiler_stats) { 1020 if (FLAG_compiler_stats) {
1038 Thread* thread = Thread::Current(); 1021 Thread* thread = Thread::Current();
1039 INC_STAT(thread, total_code_size, 1022 INC_STAT(thread, total_code_size,
1040 ExceptionHandlers::InstanceSize(handlers.num_entries())); 1023 ExceptionHandlers::InstanceSize(handlers.num_entries()));
1041 INC_STAT(thread, total_code_size, handlers.num_entries() * sizeof(uword)); 1024 INC_STAT(thread, total_code_size, handlers.num_entries() * sizeof(uword));
1042 } 1025 }
1043 } 1026 }
1044 1027
1045 1028
1046 void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) { 1029 void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
1047 ASSERT(pc_descriptors_list_ != NULL); 1030 ASSERT(pc_descriptors_list_ != NULL);
1048 const PcDescriptors& descriptors = PcDescriptors::Handle( 1031 const PcDescriptors& descriptors = PcDescriptors::Handle(
1049 pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart())); 1032 pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart()));
1050 if (!is_optimizing_) descriptors.Verify(parsed_function_.function()); 1033 if (!is_optimizing_) descriptors.Verify(parsed_function_.function());
(...skipping 19 matching lines...) Expand all
1070 } else { 1053 } else {
1071 const Array& array = 1054 const Array& array =
1072 Array::Handle(Array::New(deopt_info_table_size, Heap::kOld)); 1055 Array::Handle(Array::New(deopt_info_table_size, Heap::kOld));
1073 Smi& offset = Smi::Handle(); 1056 Smi& offset = Smi::Handle();
1074 TypedData& info = TypedData::Handle(); 1057 TypedData& info = TypedData::Handle();
1075 Smi& reason_and_flags = Smi::Handle(); 1058 Smi& reason_and_flags = Smi::Handle();
1076 for (intptr_t i = 0; i < deopt_infos_.length(); i++) { 1059 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
1077 offset = Smi::New(deopt_infos_[i]->pc_offset()); 1060 offset = Smi::New(deopt_infos_[i]->pc_offset());
1078 info = deopt_infos_[i]->CreateDeoptInfo(this, &builder, array); 1061 info = deopt_infos_[i]->CreateDeoptInfo(this, &builder, array);
1079 reason_and_flags = DeoptTable::EncodeReasonAndFlags( 1062 reason_and_flags = DeoptTable::EncodeReasonAndFlags(
1080 deopt_infos_[i]->reason(), 1063 deopt_infos_[i]->reason(), deopt_infos_[i]->flags());
1081 deopt_infos_[i]->flags());
1082 DeoptTable::SetEntry(array, i, offset, info, reason_and_flags); 1064 DeoptTable::SetEntry(array, i, offset, info, reason_and_flags);
1083 } 1065 }
1084 return array.raw(); 1066 return array.raw();
1085 } 1067 }
1086 } 1068 }
1087 1069
1088 1070
1089 void FlowGraphCompiler::FinalizeStackmaps(const Code& code) { 1071 void FlowGraphCompiler::FinalizeStackmaps(const Code& code) {
1090 if (stackmap_table_builder_ == NULL) { 1072 if (stackmap_table_builder_ == NULL) {
1091 code.set_stackmaps(Object::null_array()); 1073 code.set_stackmaps(Object::null_array());
(...skipping 27 matching lines...) Expand all
1119 info.end_pos = TokenPosition::kMinSource; 1101 info.end_pos = TokenPosition::kMinSource;
1120 info.set_index(parsed_function().current_context_var()->index()); 1102 info.set_index(parsed_function().current_context_var()->index());
1121 var_descs.SetVar(0, Symbols::CurrentContextVar(), &info); 1103 var_descs.SetVar(0, Symbols::CurrentContextVar(), &info);
1122 } 1104 }
1123 code.set_var_descriptors(var_descs); 1105 code.set_var_descriptors(var_descs);
1124 } 1106 }
1125 1107
1126 1108
1127 void FlowGraphCompiler::FinalizeStaticCallTargetsTable(const Code& code) { 1109 void FlowGraphCompiler::FinalizeStaticCallTargetsTable(const Code& code) {
1128 ASSERT(code.static_calls_target_table() == Array::null()); 1110 ASSERT(code.static_calls_target_table() == Array::null());
1129 const Array& targets = Array::Handle(zone(), Array::New( 1111 const Array& targets =
1130 (static_calls_target_table_.length() * Code::kSCallTableEntryLength), 1112 Array::Handle(zone(), Array::New((static_calls_target_table_.length() *
1131 Heap::kOld)); 1113 Code::kSCallTableEntryLength),
1114 Heap::kOld));
1132 Smi& smi_offset = Smi::Handle(zone()); 1115 Smi& smi_offset = Smi::Handle(zone());
1133 for (intptr_t i = 0; i < static_calls_target_table_.length(); i++) { 1116 for (intptr_t i = 0; i < static_calls_target_table_.length(); i++) {
1134 const intptr_t target_ix = Code::kSCallTableEntryLength * i; 1117 const intptr_t target_ix = Code::kSCallTableEntryLength * i;
1135 smi_offset = Smi::New(static_calls_target_table_[i]->offset); 1118 smi_offset = Smi::New(static_calls_target_table_[i]->offset);
1136 targets.SetAt(target_ix + Code::kSCallTableOffsetEntry, smi_offset); 1119 targets.SetAt(target_ix + Code::kSCallTableOffsetEntry, smi_offset);
1137 if (static_calls_target_table_[i]->function != NULL) { 1120 if (static_calls_target_table_[i]->function != NULL) {
1138 targets.SetAt(target_ix + Code::kSCallTableFunctionEntry, 1121 targets.SetAt(target_ix + Code::kSCallTableFunctionEntry,
1139 *static_calls_target_table_[i]->function); 1122 *static_calls_target_table_[i]->function);
1140 } 1123 }
1141 if (static_calls_target_table_[i]->code != NULL) { 1124 if (static_calls_target_table_[i]->code != NULL) {
1142 targets.SetAt(target_ix + Code::kSCallTableCodeEntry, 1125 targets.SetAt(target_ix + Code::kSCallTableCodeEntry,
1143 *static_calls_target_table_[i]->code); 1126 *static_calls_target_table_[i]->code);
1144 } 1127 }
1145 } 1128 }
1146 code.set_static_calls_target_table(targets); 1129 code.set_static_calls_target_table(targets);
1147 INC_STAT(Thread::Current(), 1130 INC_STAT(Thread::Current(), total_code_size,
1148 total_code_size,
1149 targets.Length() * sizeof(uword)); 1131 targets.Length() * sizeof(uword));
1150 } 1132 }
1151 1133
1152 1134
1153 // Returns 'true' if regular code generation should be skipped. 1135 // Returns 'true' if regular code generation should be skipped.
1154 bool FlowGraphCompiler::TryIntrinsify() { 1136 bool FlowGraphCompiler::TryIntrinsify() {
1155 // Intrinsification skips arguments checks, therefore disable if in checked 1137 // Intrinsification skips arguments checks, therefore disable if in checked
1156 // mode. 1138 // mode.
1157 if (FLAG_intrinsify && !isolate()->type_checks()) { 1139 if (FLAG_intrinsify && !isolate()->type_checks()) {
1158 const Class& owner = Class::Handle(parsed_function().function().Owner()); 1140 const Class& owner = Class::Handle(parsed_function().function().Owner());
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1201 // before any deoptimization point. 1183 // before any deoptimization point.
1202 ASSERT(!intrinsic_slow_path_label_.IsBound()); 1184 ASSERT(!intrinsic_slow_path_label_.IsBound());
1203 assembler()->Bind(&intrinsic_slow_path_label_); 1185 assembler()->Bind(&intrinsic_slow_path_label_);
1204 return complete; 1186 return complete;
1205 } 1187 }
1206 1188
1207 1189
1208 // DBC is very different from other architectures in how it performs instance 1190 // DBC is very different from other architectures in how it performs instance
1209 // and static calls because it does not use stubs. 1191 // and static calls because it does not use stubs.
1210 #if !defined(TARGET_ARCH_DBC) 1192 #if !defined(TARGET_ARCH_DBC)
1211 void FlowGraphCompiler::GenerateInstanceCall( 1193 void FlowGraphCompiler::GenerateInstanceCall(intptr_t deopt_id,
1212 intptr_t deopt_id, 1194 TokenPosition token_pos,
1213 TokenPosition token_pos, 1195 intptr_t argument_count,
1214 intptr_t argument_count, 1196 LocationSummary* locs,
1215 LocationSummary* locs, 1197 const ICData& ic_data_in) {
1216 const ICData& ic_data_in) {
1217 ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original()); 1198 ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original());
1218 if (FLAG_precompiled_mode) { 1199 if (FLAG_precompiled_mode) {
1219 ic_data = ic_data.AsUnaryClassChecks(); 1200 ic_data = ic_data.AsUnaryClassChecks();
1220 EmitSwitchableInstanceCall(ic_data, argument_count, 1201 EmitSwitchableInstanceCall(ic_data, argument_count, deopt_id, token_pos,
1221 deopt_id, token_pos, locs); 1202 locs);
1222 return; 1203 return;
1223 } 1204 }
1224 ASSERT(!ic_data.IsNull()); 1205 ASSERT(!ic_data.IsNull());
1225 if (is_optimizing() && (ic_data_in.NumberOfUsedChecks() == 0)) { 1206 if (is_optimizing() && (ic_data_in.NumberOfUsedChecks() == 0)) {
1226 // Emit IC call that will count and thus may need reoptimization at 1207 // Emit IC call that will count and thus may need reoptimization at
1227 // function entry. 1208 // function entry.
1228 ASSERT(may_reoptimize() || flow_graph().IsCompiledForOsr()); 1209 ASSERT(may_reoptimize() || flow_graph().IsCompiledForOsr());
1229 switch (ic_data.NumArgsTested()) { 1210 switch (ic_data.NumArgsTested()) {
1230 case 1: 1211 case 1:
1231 EmitOptimizedInstanceCall( 1212 EmitOptimizedInstanceCall(
1232 *StubCode::OneArgOptimizedCheckInlineCache_entry(), ic_data, 1213 *StubCode::OneArgOptimizedCheckInlineCache_entry(), ic_data,
1233 argument_count, deopt_id, token_pos, locs); 1214 argument_count, deopt_id, token_pos, locs);
1234 return; 1215 return;
1235 case 2: 1216 case 2:
1236 EmitOptimizedInstanceCall( 1217 EmitOptimizedInstanceCall(
1237 *StubCode::TwoArgsOptimizedCheckInlineCache_entry(), ic_data, 1218 *StubCode::TwoArgsOptimizedCheckInlineCache_entry(), ic_data,
1238 argument_count, deopt_id, token_pos, locs); 1219 argument_count, deopt_id, token_pos, locs);
1239 return; 1220 return;
1240 default: 1221 default:
1241 UNIMPLEMENTED(); 1222 UNIMPLEMENTED();
1242 } 1223 }
1243 return; 1224 return;
1244 } 1225 }
1245 1226
1246 if (is_optimizing()) { 1227 if (is_optimizing()) {
1247 EmitMegamorphicInstanceCall(ic_data_in, argument_count, 1228 EmitMegamorphicInstanceCall(ic_data_in, argument_count, deopt_id, token_pos,
1248 deopt_id, token_pos, locs, 1229 locs, CatchClauseNode::kInvalidTryIndex);
1249 CatchClauseNode::kInvalidTryIndex);
1250 return; 1230 return;
1251 } 1231 }
1252 1232
1253 switch (ic_data.NumArgsTested()) { 1233 switch (ic_data.NumArgsTested()) {
1254 case 1: 1234 case 1:
1255 EmitInstanceCall( 1235 EmitInstanceCall(*StubCode::OneArgCheckInlineCache_entry(), ic_data,
1256 *StubCode::OneArgCheckInlineCache_entry(), ic_data, argument_count, 1236 argument_count, deopt_id, token_pos, locs);
1257 deopt_id, token_pos, locs);
1258 break; 1237 break;
1259 case 2: 1238 case 2:
1260 EmitInstanceCall( 1239 EmitInstanceCall(*StubCode::TwoArgsCheckInlineCache_entry(), ic_data,
1261 *StubCode::TwoArgsCheckInlineCache_entry(), ic_data, argument_count, 1240 argument_count, deopt_id, token_pos, locs);
1262 deopt_id, token_pos, locs);
1263 break; 1241 break;
1264 default: 1242 default:
1265 UNIMPLEMENTED(); 1243 UNIMPLEMENTED();
1266 } 1244 }
1267 } 1245 }
1268 1246
1269 1247
1270 void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id, 1248 void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id,
1271 TokenPosition token_pos, 1249 TokenPosition token_pos,
1272 const Function& function, 1250 const Function& function,
1273 intptr_t argument_count, 1251 intptr_t argument_count,
1274 const Array& argument_names, 1252 const Array& argument_names,
1275 LocationSummary* locs, 1253 LocationSummary* locs,
1276 const ICData& ic_data_in) { 1254 const ICData& ic_data_in) {
1277 const ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original()); 1255 const ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original());
1278 const Array& arguments_descriptor = Array::ZoneHandle( 1256 const Array& arguments_descriptor =
1279 ic_data.IsNull() ? ArgumentsDescriptor::New(argument_count, 1257 Array::ZoneHandle(ic_data.IsNull() ? ArgumentsDescriptor::New(
1280 argument_names) 1258 argument_count, argument_names)
1281 : ic_data.arguments_descriptor()); 1259 : ic_data.arguments_descriptor());
1282 if (is_optimizing()) { 1260 if (is_optimizing()) {
1283 EmitOptimizedStaticCall(function, arguments_descriptor, 1261 EmitOptimizedStaticCall(function, arguments_descriptor, argument_count,
1284 argument_count, deopt_id, token_pos, locs); 1262 deopt_id, token_pos, locs);
1285 } else { 1263 } else {
1286 ICData& call_ic_data = ICData::ZoneHandle(ic_data.raw()); 1264 ICData& call_ic_data = ICData::ZoneHandle(ic_data.raw());
1287 if (call_ic_data.IsNull()) { 1265 if (call_ic_data.IsNull()) {
1288 const intptr_t kNumArgsChecked = 0; 1266 const intptr_t kNumArgsChecked = 0;
1289 call_ic_data = GetOrAddStaticCallICData(deopt_id, 1267 call_ic_data =
1290 function, 1268 GetOrAddStaticCallICData(deopt_id, function, arguments_descriptor,
1291 arguments_descriptor, 1269 kNumArgsChecked)
1292 kNumArgsChecked)->raw(); 1270 ->raw();
1293 } 1271 }
1294 EmitUnoptimizedStaticCall(argument_count, deopt_id, token_pos, locs, 1272 EmitUnoptimizedStaticCall(argument_count, deopt_id, token_pos, locs,
1295 call_ic_data); 1273 call_ic_data);
1296 } 1274 }
1297 } 1275 }
1298 1276
1299 1277
1300 void FlowGraphCompiler::GenerateNumberTypeCheck(Register kClassIdReg, 1278 void FlowGraphCompiler::GenerateNumberTypeCheck(Register kClassIdReg,
1301 const AbstractType& type, 1279 const AbstractType& type,
1302 Label* is_instance_lbl, 1280 Label* is_instance_lbl,
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1354 assembler()->Comment("%s", buffer); 1332 assembler()->Comment("%s", buffer);
1355 #endif 1333 #endif
1356 } 1334 }
1357 1335
1358 1336
1359 #if !defined(TARGET_ARCH_DBC) 1337 #if !defined(TARGET_ARCH_DBC)
1360 // TODO(vegorov) enable edge-counters on DBC if we consider them beneficial. 1338 // TODO(vegorov) enable edge-counters on DBC if we consider them beneficial.
1361 bool FlowGraphCompiler::NeedsEdgeCounter(TargetEntryInstr* block) { 1339 bool FlowGraphCompiler::NeedsEdgeCounter(TargetEntryInstr* block) {
1362 // Only emit an edge counter if there is not goto at the end of the block, 1340 // Only emit an edge counter if there is not goto at the end of the block,
1363 // except for the entry block. 1341 // except for the entry block.
1364 return (FLAG_reorder_basic_blocks 1342 return (FLAG_reorder_basic_blocks &&
1365 && (!block->last_instruction()->IsGoto() 1343 (!block->last_instruction()->IsGoto() ||
1366 || (block == flow_graph().graph_entry()->normal_entry()))); 1344 (block == flow_graph().graph_entry()->normal_entry())));
1367 } 1345 }
1368 1346
1369 1347
1370 // Allocate a register that is not explicitly blocked. 1348 // Allocate a register that is not explicitly blocked.
1371 static Register AllocateFreeRegister(bool* blocked_registers) { 1349 static Register AllocateFreeRegister(bool* blocked_registers) {
1372 for (intptr_t regno = 0; regno < kNumberOfCpuRegisters; regno++) { 1350 for (intptr_t regno = 0; regno < kNumberOfCpuRegisters; regno++) {
1373 if (!blocked_registers[regno]) { 1351 if (!blocked_registers[regno]) {
1374 blocked_registers[regno] = true; 1352 blocked_registers[regno] = true;
1375 return static_cast<Register>(regno); 1353 return static_cast<Register>(regno);
1376 } 1354 }
1377 } 1355 }
1378 UNREACHABLE(); 1356 UNREACHABLE();
1379 return kNoRegister; 1357 return kNoRegister;
1380 } 1358 }
1381 #endif 1359 #endif
1382 1360
1383 1361
1384 void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) { 1362 void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
1385 ASSERT(!is_optimizing()); 1363 ASSERT(!is_optimizing());
1386 instr->InitializeLocationSummary(zone(), 1364 instr->InitializeLocationSummary(zone(), false); // Not optimizing.
1387 false); // Not optimizing.
1388 1365
1389 // No need to allocate registers based on LocationSummary on DBC as in 1366 // No need to allocate registers based on LocationSummary on DBC as in
1390 // unoptimized mode it's a stack based bytecode just like IR itself. 1367 // unoptimized mode it's a stack based bytecode just like IR itself.
1391 #if !defined(TARGET_ARCH_DBC) 1368 #if !defined(TARGET_ARCH_DBC)
1392 LocationSummary* locs = instr->locs(); 1369 LocationSummary* locs = instr->locs();
1393 1370
1394 bool blocked_registers[kNumberOfCpuRegisters]; 1371 bool blocked_registers[kNumberOfCpuRegisters];
1395 1372
1396 // Block all registers globally reserved by the assembler, etc and mark 1373 // Block all registers globally reserved by the assembler, etc and mark
1397 // the rest as free. 1374 // the rest as free.
1398 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) { 1375 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
1399 blocked_registers[i] = (kDartAvailableCpuRegs & (1 << i)) == 0; 1376 blocked_registers[i] = (kDartAvailableCpuRegs & (1 << i)) == 0;
1400 } 1377 }
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1446 if (should_pop) { 1423 if (should_pop) {
1447 assembler()->PopRegister(reg); 1424 assembler()->PopRegister(reg);
1448 } 1425 }
1449 } 1426 }
1450 1427
1451 // Allocate all unallocated temp locations. 1428 // Allocate all unallocated temp locations.
1452 for (intptr_t i = 0; i < locs->temp_count(); i++) { 1429 for (intptr_t i = 0; i < locs->temp_count(); i++) {
1453 Location loc = locs->temp(i); 1430 Location loc = locs->temp(i);
1454 if (loc.IsUnallocated()) { 1431 if (loc.IsUnallocated()) {
1455 ASSERT(loc.policy() == Location::kRequiresRegister); 1432 ASSERT(loc.policy() == Location::kRequiresRegister);
1456 loc = Location::RegisterLocation( 1433 loc = Location::RegisterLocation(AllocateFreeRegister(blocked_registers));
1457 AllocateFreeRegister(blocked_registers));
1458 locs->set_temp(i, loc); 1434 locs->set_temp(i, loc);
1459 } 1435 }
1460 } 1436 }
1461 1437
1462 Location result_location = locs->out(0); 1438 Location result_location = locs->out(0);
1463 if (result_location.IsUnallocated()) { 1439 if (result_location.IsUnallocated()) {
1464 switch (result_location.policy()) { 1440 switch (result_location.policy()) {
1465 case Location::kAny: 1441 case Location::kAny:
1466 case Location::kPrefersRegister: 1442 case Location::kPrefersRegister:
1467 case Location::kRequiresRegister: 1443 case Location::kRequiresRegister:
1468 case Location::kWritableRegister: 1444 case Location::kWritableRegister:
1469 result_location = Location::RegisterLocation( 1445 result_location =
1470 AllocateFreeRegister(blocked_registers)); 1446 Location::RegisterLocation(AllocateFreeRegister(blocked_registers));
1471 break; 1447 break;
1472 case Location::kSameAsFirstInput: 1448 case Location::kSameAsFirstInput:
1473 result_location = locs->in(0); 1449 result_location = locs->in(0);
1474 break; 1450 break;
1475 case Location::kRequiresFpuRegister: 1451 case Location::kRequiresFpuRegister:
1476 UNREACHABLE(); 1452 UNREACHABLE();
1477 break; 1453 break;
1478 } 1454 }
1479 locs->set_out(0, result_location); 1455 locs->set_out(0, result_location);
1480 } 1456 }
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
1647 } 1623 }
1648 } else { 1624 } else {
1649 *spilled = false; 1625 *spilled = false;
1650 } 1626 }
1651 1627
1652 return scratch; 1628 return scratch;
1653 } 1629 }
1654 1630
1655 1631
1656 ParallelMoveResolver::ScratchFpuRegisterScope::ScratchFpuRegisterScope( 1632 ParallelMoveResolver::ScratchFpuRegisterScope::ScratchFpuRegisterScope(
1657 ParallelMoveResolver* resolver, FpuRegister blocked) 1633 ParallelMoveResolver* resolver,
1658 : resolver_(resolver), 1634 FpuRegister blocked)
1659 reg_(kNoFpuRegister), 1635 : resolver_(resolver), reg_(kNoFpuRegister), spilled_(false) {
1660 spilled_(false) {
1661 COMPILE_ASSERT(FpuTMP != kNoFpuRegister); 1636 COMPILE_ASSERT(FpuTMP != kNoFpuRegister);
1662 uword blocked_mask = ((blocked != kNoFpuRegister) ? 1 << blocked : 0) 1637 uword blocked_mask =
1663 | 1 << FpuTMP; 1638 ((blocked != kNoFpuRegister) ? 1 << blocked : 0) | 1 << FpuTMP;
1664 reg_ = static_cast<FpuRegister>( 1639 reg_ = static_cast<FpuRegister>(resolver_->AllocateScratchRegister(
1665 resolver_->AllocateScratchRegister(Location::kFpuRegister, 1640 Location::kFpuRegister, blocked_mask, 0, kNumberOfFpuRegisters - 1,
1666 blocked_mask, 1641 &spilled_));
1667 0,
1668 kNumberOfFpuRegisters - 1,
1669 &spilled_));
1670 1642
1671 if (spilled_) { 1643 if (spilled_) {
1672 resolver->SpillFpuScratch(reg_); 1644 resolver->SpillFpuScratch(reg_);
1673 } 1645 }
1674 } 1646 }
1675 1647
1676 1648
1677 ParallelMoveResolver::ScratchFpuRegisterScope::~ScratchFpuRegisterScope() { 1649 ParallelMoveResolver::ScratchFpuRegisterScope::~ScratchFpuRegisterScope() {
1678 if (spilled_) { 1650 if (spilled_) {
1679 resolver_->RestoreFpuScratch(reg_); 1651 resolver_->RestoreFpuScratch(reg_);
1680 } 1652 }
1681 } 1653 }
1682 1654
1683 1655
1684 ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope( 1656 ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
1685 ParallelMoveResolver* resolver, Register blocked) 1657 ParallelMoveResolver* resolver,
1686 : resolver_(resolver), 1658 Register blocked)
1687 reg_(kNoRegister), 1659 : resolver_(resolver), reg_(kNoRegister), spilled_(false) {
1688 spilled_(false) {
1689 uword blocked_mask = RegMaskBit(blocked) | kReservedCpuRegisters; 1660 uword blocked_mask = RegMaskBit(blocked) | kReservedCpuRegisters;
1690 if (resolver->compiler_->intrinsic_mode()) { 1661 if (resolver->compiler_->intrinsic_mode()) {
1691 // Block additional registers that must be preserved for intrinsics. 1662 // Block additional registers that must be preserved for intrinsics.
1692 blocked_mask |= RegMaskBit(ARGS_DESC_REG); 1663 blocked_mask |= RegMaskBit(ARGS_DESC_REG);
1693 #if !defined(TARGET_ARCH_IA32) 1664 #if !defined(TARGET_ARCH_IA32)
1694 // Need to preserve CODE_REG to be able to store the PC marker 1665 // Need to preserve CODE_REG to be able to store the PC marker
1695 // and load the pool pointer. 1666 // and load the pool pointer.
1696 blocked_mask |= RegMaskBit(CODE_REG); 1667 blocked_mask |= RegMaskBit(CODE_REG);
1697 #endif 1668 #endif
1698 } 1669 }
1699 reg_ = static_cast<Register>( 1670 reg_ = static_cast<Register>(
1700 resolver_->AllocateScratchRegister(Location::kRegister, 1671 resolver_->AllocateScratchRegister(Location::kRegister, blocked_mask, 0,
1701 blocked_mask, 1672 kNumberOfCpuRegisters - 1, &spilled_));
1702 0,
1703 kNumberOfCpuRegisters - 1,
1704 &spilled_));
1705 1673
1706 if (spilled_) { 1674 if (spilled_) {
1707 resolver->SpillScratch(reg_); 1675 resolver->SpillScratch(reg_);
1708 } 1676 }
1709 } 1677 }
1710 1678
1711 1679
1712 ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() { 1680 ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() {
1713 if (spilled_) { 1681 if (spilled_) {
1714 resolver_->RestoreScratch(reg_); 1682 resolver_->RestoreScratch(reg_);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1749 intptr_t num_args_tested) { 1717 intptr_t num_args_tested) {
1750 if ((deopt_id_to_ic_data_ != NULL) && 1718 if ((deopt_id_to_ic_data_ != NULL) &&
1751 ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) { 1719 ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) {
1752 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id]; 1720 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1753 ASSERT(res->deopt_id() == deopt_id); 1721 ASSERT(res->deopt_id() == deopt_id);
1754 ASSERT(res->target_name() == target_name.raw()); 1722 ASSERT(res->target_name() == target_name.raw());
1755 ASSERT(res->NumArgsTested() == num_args_tested); 1723 ASSERT(res->NumArgsTested() == num_args_tested);
1756 ASSERT(!res->is_static_call()); 1724 ASSERT(!res->is_static_call());
1757 return res; 1725 return res;
1758 } 1726 }
1759 const ICData& ic_data = ICData::ZoneHandle(zone(), ICData::New( 1727 const ICData& ic_data =
1760 parsed_function().function(), target_name, 1728 ICData::ZoneHandle(zone(), ICData::New(parsed_function().function(),
1761 arguments_descriptor, deopt_id, num_args_tested, false)); 1729 target_name, arguments_descriptor,
1730 deopt_id, num_args_tested, false));
1762 #if defined(TAG_IC_DATA) 1731 #if defined(TAG_IC_DATA)
1763 ic_data.set_tag(Instruction::kInstanceCall); 1732 ic_data.set_tag(Instruction::kInstanceCall);
1764 #endif 1733 #endif
1765 if (deopt_id_to_ic_data_ != NULL) { 1734 if (deopt_id_to_ic_data_ != NULL) {
1766 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data; 1735 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1767 } 1736 }
1768 ASSERT(!ic_data.is_static_call()); 1737 ASSERT(!ic_data.is_static_call());
1769 return &ic_data; 1738 return &ic_data;
1770 } 1739 }
1771 1740
1772 1741
1773 const ICData* FlowGraphCompiler::GetOrAddStaticCallICData( 1742 const ICData* FlowGraphCompiler::GetOrAddStaticCallICData(
1774 intptr_t deopt_id, 1743 intptr_t deopt_id,
1775 const Function& target, 1744 const Function& target,
1776 const Array& arguments_descriptor, 1745 const Array& arguments_descriptor,
1777 intptr_t num_args_tested) { 1746 intptr_t num_args_tested) {
1778 if ((deopt_id_to_ic_data_ != NULL) && 1747 if ((deopt_id_to_ic_data_ != NULL) &&
1779 ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) { 1748 ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) {
1780 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id]; 1749 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1781 ASSERT(res->deopt_id() == deopt_id); 1750 ASSERT(res->deopt_id() == deopt_id);
1782 ASSERT(res->target_name() == target.name()); 1751 ASSERT(res->target_name() == target.name());
1783 ASSERT(res->NumArgsTested() == num_args_tested); 1752 ASSERT(res->NumArgsTested() == num_args_tested);
1784 ASSERT(res->is_static_call()); 1753 ASSERT(res->is_static_call());
1785 return res; 1754 return res;
1786 } 1755 }
1787 const ICData& ic_data = ICData::ZoneHandle(zone(), ICData::New( 1756 const ICData& ic_data = ICData::ZoneHandle(
1788 parsed_function().function(), String::Handle(zone(), target.name()), 1757 zone(),
1789 arguments_descriptor, deopt_id, num_args_tested, true)); 1758 ICData::New(parsed_function().function(),
1759 String::Handle(zone(), target.name()), arguments_descriptor,
1760 deopt_id, num_args_tested, true));
1790 ic_data.AddTarget(target); 1761 ic_data.AddTarget(target);
1791 #if defined(TAG_IC_DATA) 1762 #if defined(TAG_IC_DATA)
1792 ic_data.set_tag(Instruction::kStaticCall); 1763 ic_data.set_tag(Instruction::kStaticCall);
1793 #endif 1764 #endif
1794 if (deopt_id_to_ic_data_ != NULL) { 1765 if (deopt_id_to_ic_data_ != NULL) {
1795 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data; 1766 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1796 } 1767 }
1797 return &ic_data; 1768 return &ic_data;
1798 } 1769 }
1799 1770
1800 1771
1801 intptr_t FlowGraphCompiler::GetOptimizationThreshold() const { 1772 intptr_t FlowGraphCompiler::GetOptimizationThreshold() const {
1802 intptr_t threshold; 1773 intptr_t threshold;
1803 if (is_optimizing()) { 1774 if (is_optimizing()) {
1804 threshold = FLAG_reoptimization_counter_threshold; 1775 threshold = FLAG_reoptimization_counter_threshold;
1805 } else if (parsed_function_.function().IsIrregexpFunction()) { 1776 } else if (parsed_function_.function().IsIrregexpFunction()) {
1806 threshold = FLAG_regexp_optimization_counter_threshold; 1777 threshold = FLAG_regexp_optimization_counter_threshold;
1807 } else { 1778 } else {
1808 const intptr_t basic_blocks = flow_graph().preorder().length(); 1779 const intptr_t basic_blocks = flow_graph().preorder().length();
1809 ASSERT(basic_blocks > 0); 1780 ASSERT(basic_blocks > 0);
1810 threshold = FLAG_optimization_counter_scale * basic_blocks + 1781 threshold = FLAG_optimization_counter_scale * basic_blocks +
1811 FLAG_min_optimization_counter_threshold; 1782 FLAG_min_optimization_counter_threshold;
1812 if (threshold > FLAG_optimization_counter_threshold) { 1783 if (threshold > FLAG_optimization_counter_threshold) {
1813 threshold = FLAG_optimization_counter_threshold; 1784 threshold = FLAG_optimization_counter_threshold;
1814 } 1785 }
1815 } 1786 }
1816 return threshold; 1787 return threshold;
1817 } 1788 }
1818 1789
1819 1790
1820 const Class& FlowGraphCompiler::BoxClassFor(Representation rep) { 1791 const Class& FlowGraphCompiler::BoxClassFor(Representation rep) {
1821 switch (rep) { 1792 switch (rep) {
(...skipping 11 matching lines...) Expand all
1833 UNREACHABLE(); 1804 UNREACHABLE();
1834 return Class::ZoneHandle(); 1805 return Class::ZoneHandle();
1835 } 1806 }
1836 } 1807 }
1837 1808
1838 1809
1839 RawArray* FlowGraphCompiler::InliningIdToFunction() const { 1810 RawArray* FlowGraphCompiler::InliningIdToFunction() const {
1840 if (inline_id_to_function_.length() == 0) { 1811 if (inline_id_to_function_.length() == 0) {
1841 return Object::empty_array().raw(); 1812 return Object::empty_array().raw();
1842 } 1813 }
1843 const Array& res = Array::Handle( 1814 const Array& res =
1844 Array::New(inline_id_to_function_.length(), Heap::kOld)); 1815 Array::Handle(Array::New(inline_id_to_function_.length(), Heap::kOld));
1845 for (intptr_t i = 0; i < inline_id_to_function_.length(); i++) { 1816 for (intptr_t i = 0; i < inline_id_to_function_.length(); i++) {
1846 res.SetAt(i, *inline_id_to_function_[i]); 1817 res.SetAt(i, *inline_id_to_function_[i]);
1847 } 1818 }
1848 return res.raw(); 1819 return res.raw();
1849 } 1820 }
1850 1821
1851 1822
1852 RawArray* FlowGraphCompiler::InliningIdToTokenPos() const { 1823 RawArray* FlowGraphCompiler::InliningIdToTokenPos() const {
1853 if (inline_id_to_token_pos_.length() == 0) { 1824 if (inline_id_to_token_pos_.length() == 0) {
1854 return Object::empty_array().raw(); 1825 return Object::empty_array().raw();
1855 } 1826 }
1856 const Array& res = Array::Handle(zone(), 1827 const Array& res = Array::Handle(
1857 Array::New(inline_id_to_token_pos_.length(), Heap::kOld)); 1828 zone(), Array::New(inline_id_to_token_pos_.length(), Heap::kOld));
1858 Smi& smi = Smi::Handle(zone()); 1829 Smi& smi = Smi::Handle(zone());
1859 for (intptr_t i = 0; i < inline_id_to_token_pos_.length(); i++) { 1830 for (intptr_t i = 0; i < inline_id_to_token_pos_.length(); i++) {
1860 smi = Smi::New(inline_id_to_token_pos_[i].value()); 1831 smi = Smi::New(inline_id_to_token_pos_[i].value());
1861 res.SetAt(i, smi); 1832 res.SetAt(i, smi);
1862 } 1833 }
1863 return res.raw(); 1834 return res.raw();
1864 } 1835 }
1865 1836
1866 1837
1867 RawArray* FlowGraphCompiler::CallerInliningIdMap() const { 1838 RawArray* FlowGraphCompiler::CallerInliningIdMap() const {
1868 if (caller_inline_id_.length() == 0) { 1839 if (caller_inline_id_.length() == 0) {
1869 return Object::empty_array().raw(); 1840 return Object::empty_array().raw();
1870 } 1841 }
1871 const Array& res = Array::Handle( 1842 const Array& res =
1872 Array::New(caller_inline_id_.length(), Heap::kOld)); 1843 Array::Handle(Array::New(caller_inline_id_.length(), Heap::kOld));
1873 Smi& smi = Smi::Handle(); 1844 Smi& smi = Smi::Handle();
1874 for (intptr_t i = 0; i < caller_inline_id_.length(); i++) { 1845 for (intptr_t i = 0; i < caller_inline_id_.length(); i++) {
1875 smi = Smi::New(caller_inline_id_[i]); 1846 smi = Smi::New(caller_inline_id_[i]);
1876 res.SetAt(i, smi); 1847 res.SetAt(i, smi);
1877 } 1848 }
1878 return res.raw(); 1849 return res.raw();
1879 } 1850 }
1880 1851
1881 1852
1882 void FlowGraphCompiler::BeginCodeSourceRange() { 1853 void FlowGraphCompiler::BeginCodeSourceRange() {
1883 NOT_IN_PRODUCT( 1854 #if !defined(PRODUCT)
1884 // Remember how many bytes of code we emitted so far. This function 1855 // Remember how many bytes of code we emitted so far. This function
1885 // is called before we call into an instruction's EmitNativeCode. 1856 // is called before we call into an instruction's EmitNativeCode.
1886 saved_code_size_ = assembler()->CodeSize(); 1857 saved_code_size_ = assembler()->CodeSize();
1887 ); 1858 #endif // !defined(PRODUCT)
1888 } 1859 }
1889 1860
1890 1861
1891 bool FlowGraphCompiler::EndCodeSourceRange(TokenPosition token_pos) { 1862 bool FlowGraphCompiler::EndCodeSourceRange(TokenPosition token_pos) {
1892 NOT_IN_PRODUCT( 1863 #if !defined(PRODUCT)
1893 // This function is called after each instructions' EmitNativeCode. 1864 // This function is called after each instructions' EmitNativeCode.
1894 if (saved_code_size_ < assembler()->CodeSize()) { 1865 if (saved_code_size_ < assembler()->CodeSize()) {
1895 // We emitted more code, now associate the emitted code chunk with 1866 // We emitted more code, now associate the emitted code chunk with
1896 // |token_pos|. 1867 // |token_pos|.
1897 code_source_map_builder()->AddEntry(saved_code_size_, token_pos); 1868 code_source_map_builder()->AddEntry(saved_code_size_, token_pos);
1898 BeginCodeSourceRange(); 1869 BeginCodeSourceRange();
1899 return true; 1870 return true;
1900 } 1871 }
1901 ); 1872 #endif // !defined(PRODUCT)
1902 return false; 1873 return false;
1903 } 1874 }
1904 1875
1905 1876
1906 #if !defined(TARGET_ARCH_DBC) 1877 #if !defined(TARGET_ARCH_DBC)
1907 // DBC emits calls very differently from other architectures due to its 1878 // DBC emits calls very differently from other architectures due to its
1908 // interpreted nature. 1879 // interpreted nature.
1909 void FlowGraphCompiler::EmitPolymorphicInstanceCall( 1880 void FlowGraphCompiler::EmitPolymorphicInstanceCall(const ICData& ic_data,
1910 const ICData& ic_data, 1881 intptr_t argument_count,
1911 intptr_t argument_count, 1882 const Array& argument_names,
1912 const Array& argument_names, 1883 intptr_t deopt_id,
1913 intptr_t deopt_id, 1884 TokenPosition token_pos,
1914 TokenPosition token_pos, 1885 LocationSummary* locs,
1915 LocationSummary* locs, 1886 bool complete) {
1916 bool complete) {
1917 if (FLAG_polymorphic_with_deopt) { 1887 if (FLAG_polymorphic_with_deopt) {
1918 Label* deopt = AddDeoptStub(deopt_id, 1888 Label* deopt =
1919 ICData::kDeoptPolymorphicInstanceCallTestFail); 1889 AddDeoptStub(deopt_id, ICData::kDeoptPolymorphicInstanceCallTestFail);
1920 Label ok; 1890 Label ok;
1921 EmitTestAndCall(ic_data, argument_count, argument_names, 1891 EmitTestAndCall(ic_data, argument_count, argument_names,
1922 deopt, // No cid match. 1892 deopt, // No cid match.
1923 &ok, // Found cid. 1893 &ok, // Found cid.
1924 deopt_id, token_pos, locs, complete); 1894 deopt_id, token_pos, locs, complete);
1925 assembler()->Bind(&ok); 1895 assembler()->Bind(&ok);
1926 } else { 1896 } else {
1927 if (complete) { 1897 if (complete) {
1928 Label ok; 1898 Label ok;
1929 EmitTestAndCall(ic_data, argument_count, argument_names, 1899 EmitTestAndCall(ic_data, argument_count, argument_names,
1930 NULL, // No cid match. 1900 NULL, // No cid match.
1931 &ok, // Found cid. 1901 &ok, // Found cid.
1932 deopt_id, token_pos, locs, true); 1902 deopt_id, token_pos, locs, true);
1933 assembler()->Bind(&ok); 1903 assembler()->Bind(&ok);
1934 } else { 1904 } else {
1935 EmitSwitchableInstanceCall(ic_data, argument_count, 1905 EmitSwitchableInstanceCall(ic_data, argument_count, deopt_id, token_pos,
1936 deopt_id, token_pos, locs); 1906 locs);
1937 } 1907 }
1938 } 1908 }
1939 } 1909 }
1940 #endif 1910 #endif
1941 1911
1942 #if defined(DEBUG) && !defined(TARGET_ARCH_DBC) 1912 #if defined(DEBUG) && !defined(TARGET_ARCH_DBC)
1943 // TODO(vegorov) re-enable frame state tracking on DBC. It is 1913 // TODO(vegorov) re-enable frame state tracking on DBC. It is
1944 // currently disabled because it relies on LocationSummaries and 1914 // currently disabled because it relies on LocationSummaries and
1945 // we don't use them during unoptimized compilation on DBC. 1915 // we don't use them during unoptimized compilation on DBC.
1946 void FlowGraphCompiler::FrameStateUpdateWith(Instruction* instr) { 1916 void FlowGraphCompiler::FrameStateUpdateWith(Instruction* instr) {
1947 ASSERT(!is_optimizing()); 1917 ASSERT(!is_optimizing());
1948 1918
1949 switch (instr->tag()) { 1919 switch (instr->tag()) {
1950 case Instruction::kPushArgument: 1920 case Instruction::kPushArgument:
1951 // Do nothing. 1921 // Do nothing.
1952 break; 1922 break;
1953 1923
1954 case Instruction::kDropTemps: 1924 case Instruction::kDropTemps:
1955 FrameStatePop(instr->locs()->input_count() + 1925 FrameStatePop(instr->locs()->input_count() +
1956 instr->AsDropTemps()->num_temps()); 1926 instr->AsDropTemps()->num_temps());
1957 break; 1927 break;
1958 1928
1959 default: 1929 default:
1960 FrameStatePop(instr->locs()->input_count()); 1930 FrameStatePop(instr->locs()->input_count());
1961 break; 1931 break;
1962 } 1932 }
1963 1933
1964 ASSERT(!instr->locs()->can_call() || FrameStateIsSafeToCall()); 1934 ASSERT(!instr->locs()->can_call() || FrameStateIsSafeToCall());
1965 1935
1966 FrameStatePop(instr->ArgumentCount()); 1936 FrameStatePop(instr->ArgumentCount());
1967 Definition* defn = instr->AsDefinition(); 1937 Definition* defn = instr->AsDefinition();
1968 if ((defn != NULL) && defn->HasTemp()) { 1938 if ((defn != NULL) && defn->HasTemp()) {
1969 FrameStatePush(defn); 1939 FrameStatePush(defn);
1970 } 1940 }
1971 } 1941 }
1972 1942
1973 1943
1974 void FlowGraphCompiler::FrameStatePush(Definition* defn) { 1944 void FlowGraphCompiler::FrameStatePush(Definition* defn) {
1975 Representation rep = defn->representation(); 1945 Representation rep = defn->representation();
1976 if ((rep == kUnboxedDouble) || 1946 if ((rep == kUnboxedDouble) || (rep == kUnboxedFloat64x2) ||
1977 (rep == kUnboxedFloat64x2) ||
1978 (rep == kUnboxedFloat32x4)) { 1947 (rep == kUnboxedFloat32x4)) {
1979 // LoadField instruction lies about its representation in the unoptimized 1948 // LoadField instruction lies about its representation in the unoptimized
1980 // code because Definition::representation() can't depend on the type of 1949 // code because Definition::representation() can't depend on the type of
1981 // compilation but MakeLocationSummary and EmitNativeCode can. 1950 // compilation but MakeLocationSummary and EmitNativeCode can.
1982 ASSERT(defn->IsLoadField() && defn->AsLoadField()->IsUnboxedLoad()); 1951 ASSERT(defn->IsLoadField() && defn->AsLoadField()->IsUnboxedLoad());
1983 ASSERT(defn->locs()->out(0).IsRegister()); 1952 ASSERT(defn->locs()->out(0).IsRegister());
1984 rep = kTagged; 1953 rep = kTagged;
1985 } 1954 }
1986 ASSERT(!is_optimizing()); 1955 ASSERT(!is_optimizing());
1987 ASSERT((rep == kTagged) || (rep == kUntagged)); 1956 ASSERT((rep == kTagged) || (rep == kUntagged));
1988 ASSERT(rep != kUntagged || flow_graph_.IsIrregexpFunction()); 1957 ASSERT(rep != kUntagged || flow_graph_.IsIrregexpFunction());
1989 frame_state_.Add(rep); 1958 frame_state_.Add(rep);
1990 } 1959 }
1991 1960
1992 1961
1993 void FlowGraphCompiler::FrameStatePop(intptr_t count) { 1962 void FlowGraphCompiler::FrameStatePop(intptr_t count) {
1994 ASSERT(!is_optimizing()); 1963 ASSERT(!is_optimizing());
1995 frame_state_.TruncateTo(Utils::Maximum(static_cast<intptr_t>(0), 1964 frame_state_.TruncateTo(
1996 frame_state_.length() - count)); 1965 Utils::Maximum(static_cast<intptr_t>(0), frame_state_.length() - count));
1997 } 1966 }
1998 1967
1999 1968
2000 bool FlowGraphCompiler::FrameStateIsSafeToCall() { 1969 bool FlowGraphCompiler::FrameStateIsSafeToCall() {
2001 ASSERT(!is_optimizing()); 1970 ASSERT(!is_optimizing());
2002 for (intptr_t i = 0; i < frame_state_.length(); i++) { 1971 for (intptr_t i = 0; i < frame_state_.length(); i++) {
2003 if (frame_state_[i] != kTagged) { 1972 if (frame_state_[i] != kTagged) {
2004 return false; 1973 return false;
2005 } 1974 }
2006 } 1975 }
2007 return true; 1976 return true;
2008 } 1977 }
2009 1978
2010 1979
2011 void FlowGraphCompiler::FrameStateClear() { 1980 void FlowGraphCompiler::FrameStateClear() {
2012 ASSERT(!is_optimizing()); 1981 ASSERT(!is_optimizing());
2013 frame_state_.TruncateTo(0); 1982 frame_state_.TruncateTo(0);
2014 } 1983 }
2015 #endif // defined(DEBUG) && !defined(TARGET_ARCH_DBC) 1984 #endif // defined(DEBUG) && !defined(TARGET_ARCH_DBC)
2016 1985
2017 1986
2018 } // namespace dart 1987 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler.h ('k') | runtime/vm/flow_graph_compiler_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698