OLD | NEW |
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/flow_graph.h" | 5 #include "vm/flow_graph.h" |
6 | 6 |
7 #include "vm/bit_vector.h" | 7 #include "vm/bit_vector.h" |
8 #include "vm/cha.h" | 8 #include "vm/cha.h" |
9 #include "vm/flow_graph_builder.h" | 9 #include "vm/flow_graph_builder.h" |
10 #include "vm/flow_graph_compiler.h" | 10 #include "vm/flow_graph_compiler.h" |
11 #include "vm/flow_graph_range_analysis.h" | 11 #include "vm/flow_graph_range_analysis.h" |
12 #include "vm/il_printer.h" | 12 #include "vm/il_printer.h" |
13 #include "vm/intermediate_language.h" | 13 #include "vm/intermediate_language.h" |
14 #include "vm/growable_array.h" | 14 #include "vm/growable_array.h" |
15 #include "vm/object_store.h" | 15 #include "vm/object_store.h" |
16 | 16 |
17 namespace dart { | 17 namespace dart { |
18 | 18 |
19 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) | 19 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) |
20 DEFINE_FLAG(bool, trace_smi_widening, false, "Trace Smi->Int32 widening pass."); | 20 DEFINE_FLAG(bool, trace_smi_widening, false, "Trace Smi->Int32 widening pass."); |
21 #endif | 21 #endif |
22 DEFINE_FLAG(bool, prune_dead_locals, true, "optimize dead locals away"); | 22 DEFINE_FLAG(bool, prune_dead_locals, true, "optimize dead locals away"); |
23 DECLARE_FLAG(bool, verify_compiler); | 23 DECLARE_FLAG(bool, verify_compiler); |
24 | 24 |
25 | 25 |
26 FlowGraph::FlowGraph(const ParsedFunction& parsed_function, | 26 FlowGraph::FlowGraph(const ParsedFunction& parsed_function, |
27 GraphEntryInstr* graph_entry, | 27 GraphEntryInstr* graph_entry, |
28 intptr_t max_block_id) | 28 intptr_t max_block_id) |
29 : thread_(Thread::Current()), | 29 : thread_(Thread::Current()), |
30 parent_(), | 30 parent_(), |
31 current_ssa_temp_index_(0), | 31 current_ssa_temp_index_(0), |
32 max_block_id_(max_block_id), | 32 max_block_id_(max_block_id), |
33 parsed_function_(parsed_function), | 33 parsed_function_(parsed_function), |
34 num_copied_params_(parsed_function.num_copied_params()), | 34 num_copied_params_(parsed_function.num_copied_params()), |
35 num_non_copied_params_(parsed_function.num_non_copied_params()), | 35 num_non_copied_params_(parsed_function.num_non_copied_params()), |
36 graph_entry_(graph_entry), | 36 graph_entry_(graph_entry), |
37 preorder_(), | 37 preorder_(), |
38 postorder_(), | 38 postorder_(), |
39 reverse_postorder_(), | 39 reverse_postorder_(), |
40 optimized_block_order_(), | 40 optimized_block_order_(), |
41 constant_null_(NULL), | 41 constant_null_(NULL), |
42 constant_dead_(NULL), | 42 constant_dead_(NULL), |
43 constant_empty_context_(NULL), | 43 constant_empty_context_(NULL), |
44 block_effects_(NULL), | 44 block_effects_(NULL), |
45 licm_allowed_(true), | 45 licm_allowed_(true), |
46 loop_headers_(NULL), | 46 loop_headers_(NULL), |
47 loop_invariant_loads_(NULL), | 47 loop_invariant_loads_(NULL), |
48 deferred_prefixes_(parsed_function.deferred_prefixes()), | 48 deferred_prefixes_(parsed_function.deferred_prefixes()), |
49 captured_parameters_(new(zone()) BitVector(zone(), variable_count())), | 49 captured_parameters_(new (zone()) BitVector(zone(), variable_count())), |
50 inlining_id_(-1) { | 50 inlining_id_(-1) { |
51 DiscoverBlocks(); | 51 DiscoverBlocks(); |
52 } | 52 } |
53 | 53 |
54 | 54 |
55 void FlowGraph::EnsureSSATempIndex(Definition* defn, | 55 void FlowGraph::EnsureSSATempIndex(Definition* defn, Definition* replacement) { |
56 Definition* replacement) { | 56 if ((replacement->ssa_temp_index() == -1) && (defn->ssa_temp_index() != -1)) { |
57 if ((replacement->ssa_temp_index() == -1) && | |
58 (defn->ssa_temp_index() != -1)) { | |
59 AllocateSSAIndexes(replacement); | 57 AllocateSSAIndexes(replacement); |
60 } | 58 } |
61 } | 59 } |
62 | 60 |
63 | 61 |
64 void FlowGraph::ReplaceCurrentInstruction(ForwardInstructionIterator* iterator, | 62 void FlowGraph::ReplaceCurrentInstruction(ForwardInstructionIterator* iterator, |
65 Instruction* current, | 63 Instruction* current, |
66 Instruction* replacement) { | 64 Instruction* replacement) { |
67 Definition* current_defn = current->AsDefinition(); | 65 Definition* current_defn = current->AsDefinition(); |
68 if ((replacement != NULL) && (current_defn != NULL)) { | 66 if ((replacement != NULL) && (current_defn != NULL)) { |
(...skipping 24 matching lines...) Expand all Loading... |
93 } | 91 } |
94 } | 92 } |
95 iterator->RemoveCurrentFromGraph(); | 93 iterator->RemoveCurrentFromGraph(); |
96 } | 94 } |
97 | 95 |
98 | 96 |
99 void FlowGraph::AddToDeferredPrefixes( | 97 void FlowGraph::AddToDeferredPrefixes( |
100 ZoneGrowableArray<const LibraryPrefix*>* from) { | 98 ZoneGrowableArray<const LibraryPrefix*>* from) { |
101 ZoneGrowableArray<const LibraryPrefix*>* to = deferred_prefixes(); | 99 ZoneGrowableArray<const LibraryPrefix*>* to = deferred_prefixes(); |
102 for (intptr_t i = 0; i < from->length(); i++) { | 100 for (intptr_t i = 0; i < from->length(); i++) { |
103 const LibraryPrefix* prefix = (*from)[i]; | 101 const LibraryPrefix* prefix = (*from)[i]; |
104 for (intptr_t j = 0; j < to->length(); j++) { | 102 for (intptr_t j = 0; j < to->length(); j++) { |
105 if ((*to)[j]->raw() == prefix->raw()) { | 103 if ((*to)[j]->raw() == prefix->raw()) { |
106 return; | 104 return; |
107 } | 105 } |
108 } | 106 } |
109 to->Add(prefix); | 107 to->Add(prefix); |
110 } | 108 } |
111 } | 109 } |
112 | 110 |
113 | 111 |
114 bool FlowGraph::ShouldReorderBlocks(const Function& function, | 112 bool FlowGraph::ShouldReorderBlocks(const Function& function, |
115 bool is_optimized) { | 113 bool is_optimized) { |
116 return is_optimized | 114 return is_optimized && FLAG_reorder_basic_blocks && !function.is_intrinsic(); |
117 && FLAG_reorder_basic_blocks | |
118 && !function.is_intrinsic(); | |
119 } | 115 } |
120 | 116 |
121 | 117 |
122 GrowableArray<BlockEntryInstr*>* FlowGraph::CodegenBlockOrder( | 118 GrowableArray<BlockEntryInstr*>* FlowGraph::CodegenBlockOrder( |
123 bool is_optimized) { | 119 bool is_optimized) { |
124 return ShouldReorderBlocks(function(), is_optimized) | 120 return ShouldReorderBlocks(function(), is_optimized) ? &optimized_block_order_ |
125 ? &optimized_block_order_ | 121 : &reverse_postorder_; |
126 : &reverse_postorder_; | |
127 } | 122 } |
128 | 123 |
129 | 124 |
130 ConstantInstr* FlowGraph::GetConstant(const Object& object) { | 125 ConstantInstr* FlowGraph::GetConstant(const Object& object) { |
131 ConstantInstr* constant = constant_instr_pool_.LookupValue(object); | 126 ConstantInstr* constant = constant_instr_pool_.LookupValue(object); |
132 if (constant == NULL) { | 127 if (constant == NULL) { |
133 // Otherwise, allocate and add it to the pool. | 128 // Otherwise, allocate and add it to the pool. |
134 constant = new(zone()) ConstantInstr( | 129 constant = |
135 Object::ZoneHandle(zone(), object.raw())); | 130 new (zone()) ConstantInstr(Object::ZoneHandle(zone(), object.raw())); |
136 constant->set_ssa_temp_index(alloc_ssa_temp_index()); | 131 constant->set_ssa_temp_index(alloc_ssa_temp_index()); |
137 | 132 |
138 AddToInitialDefinitions(constant); | 133 AddToInitialDefinitions(constant); |
139 constant_instr_pool_.Insert(constant); | 134 constant_instr_pool_.Insert(constant); |
140 } | 135 } |
141 return constant; | 136 return constant; |
142 } | 137 } |
143 | 138 |
144 | 139 |
145 void FlowGraph::AddToInitialDefinitions(Definition* defn) { | 140 void FlowGraph::AddToInitialDefinitions(Definition* defn) { |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
188 } | 183 } |
189 return prev->AppendInstruction(instr); | 184 return prev->AppendInstruction(instr); |
190 } | 185 } |
191 | 186 |
192 | 187 |
193 // A wrapper around block entries including an index of the next successor to | 188 // A wrapper around block entries including an index of the next successor to |
194 // be read. | 189 // be read. |
195 class BlockTraversalState { | 190 class BlockTraversalState { |
196 public: | 191 public: |
197 explicit BlockTraversalState(BlockEntryInstr* block) | 192 explicit BlockTraversalState(BlockEntryInstr* block) |
198 : block_(block), | 193 : block_(block), |
199 next_successor_ix_(block->last_instruction()->SuccessorCount() - 1) { } | 194 next_successor_ix_(block->last_instruction()->SuccessorCount() - 1) {} |
200 | 195 |
201 bool HasNextSuccessor() const { return next_successor_ix_ >= 0; } | 196 bool HasNextSuccessor() const { return next_successor_ix_ >= 0; } |
202 BlockEntryInstr* NextSuccessor() { | 197 BlockEntryInstr* NextSuccessor() { |
203 ASSERT(HasNextSuccessor()); | 198 ASSERT(HasNextSuccessor()); |
204 return block_->last_instruction()->SuccessorAt(next_successor_ix_--); | 199 return block_->last_instruction()->SuccessorAt(next_successor_ix_--); |
205 } | 200 } |
206 | 201 |
207 BlockEntryInstr* block() const { return block_; } | 202 BlockEntryInstr* block() const { return block_; } |
208 | 203 |
209 private: | 204 private: |
(...skipping 10 matching lines...) Expand all Loading... |
220 // Initialize state. | 215 // Initialize state. |
221 preorder_.Clear(); | 216 preorder_.Clear(); |
222 postorder_.Clear(); | 217 postorder_.Clear(); |
223 reverse_postorder_.Clear(); | 218 reverse_postorder_.Clear(); |
224 parent_.Clear(); | 219 parent_.Clear(); |
225 | 220 |
226 GrowableArray<BlockTraversalState> block_stack; | 221 GrowableArray<BlockTraversalState> block_stack; |
227 graph_entry_->DiscoverBlock(NULL, &preorder_, &parent_); | 222 graph_entry_->DiscoverBlock(NULL, &preorder_, &parent_); |
228 block_stack.Add(BlockTraversalState(graph_entry_)); | 223 block_stack.Add(BlockTraversalState(graph_entry_)); |
229 while (!block_stack.is_empty()) { | 224 while (!block_stack.is_empty()) { |
230 BlockTraversalState &state = block_stack.Last(); | 225 BlockTraversalState& state = block_stack.Last(); |
231 BlockEntryInstr* block = state.block(); | 226 BlockEntryInstr* block = state.block(); |
232 if (state.HasNextSuccessor()) { | 227 if (state.HasNextSuccessor()) { |
233 // Process successors one-by-one. | 228 // Process successors one-by-one. |
234 BlockEntryInstr* succ = state.NextSuccessor(); | 229 BlockEntryInstr* succ = state.NextSuccessor(); |
235 if (succ->DiscoverBlock(block, &preorder_, &parent_)) { | 230 if (succ->DiscoverBlock(block, &preorder_, &parent_)) { |
236 block_stack.Add(BlockTraversalState(succ)); | 231 block_stack.Add(BlockTraversalState(succ)); |
237 } | 232 } |
238 } else { | 233 } else { |
239 // All successors have been processed, pop the current block entry node | 234 // All successors have been processed, pop the current block entry node |
240 // and add it to the postorder list. | 235 // and add it to the postorder list. |
(...skipping 13 matching lines...) Expand all Loading... |
254 | 249 |
255 // Block effects are using postorder numbering. Discard computed information. | 250 // Block effects are using postorder numbering. Discard computed information. |
256 block_effects_ = NULL; | 251 block_effects_ = NULL; |
257 loop_headers_ = NULL; | 252 loop_headers_ = NULL; |
258 loop_invariant_loads_ = NULL; | 253 loop_invariant_loads_ = NULL; |
259 } | 254 } |
260 | 255 |
261 | 256 |
262 void FlowGraph::MergeBlocks() { | 257 void FlowGraph::MergeBlocks() { |
263 bool changed = false; | 258 bool changed = false; |
264 BitVector* merged = new(zone()) BitVector(zone(), postorder().length()); | 259 BitVector* merged = new (zone()) BitVector(zone(), postorder().length()); |
265 for (BlockIterator block_it = reverse_postorder_iterator(); | 260 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); |
266 !block_it.Done(); | |
267 block_it.Advance()) { | 261 block_it.Advance()) { |
268 BlockEntryInstr* block = block_it.Current(); | 262 BlockEntryInstr* block = block_it.Current(); |
269 if (block->IsGraphEntry()) continue; | 263 if (block->IsGraphEntry()) continue; |
270 if (merged->Contains(block->postorder_number())) continue; | 264 if (merged->Contains(block->postorder_number())) continue; |
271 | 265 |
272 Instruction* last = block->last_instruction(); | 266 Instruction* last = block->last_instruction(); |
273 BlockEntryInstr* successor = NULL; | 267 BlockEntryInstr* successor = NULL; |
274 while ((!last->IsIndirectGoto()) && | 268 while ((!last->IsIndirectGoto()) && (last->SuccessorCount() == 1) && |
275 (last->SuccessorCount() == 1) && | |
276 (!last->SuccessorAt(0)->IsIndirectEntry()) && | 269 (!last->SuccessorAt(0)->IsIndirectEntry()) && |
277 (last->SuccessorAt(0)->PredecessorCount() == 1) && | 270 (last->SuccessorAt(0)->PredecessorCount() == 1) && |
278 (block->try_index() == last->SuccessorAt(0)->try_index())) { | 271 (block->try_index() == last->SuccessorAt(0)->try_index())) { |
279 successor = last->SuccessorAt(0); | 272 successor = last->SuccessorAt(0); |
280 ASSERT(last->IsGoto()); | 273 ASSERT(last->IsGoto()); |
281 | 274 |
282 // Remove environment uses and unlink goto and block entry. | 275 // Remove environment uses and unlink goto and block entry. |
283 successor->UnuseAllInputs(); | 276 successor->UnuseAllInputs(); |
284 last->previous()->LinkTo(successor->next()); | 277 last->previous()->LinkTo(successor->next()); |
285 last->UnuseAllInputs(); | 278 last->UnuseAllInputs(); |
286 | 279 |
287 last = successor->last_instruction(); | 280 last = successor->last_instruction(); |
288 merged->Add(successor->postorder_number()); | 281 merged->Add(successor->postorder_number()); |
289 changed = true; | 282 changed = true; |
290 if (FLAG_trace_optimization) { | 283 if (FLAG_trace_optimization) { |
291 OS::Print("Merged blocks B%" Pd " and B%" Pd "\n", | 284 OS::Print("Merged blocks B%" Pd " and B%" Pd "\n", block->block_id(), |
292 block->block_id(), | |
293 successor->block_id()); | 285 successor->block_id()); |
294 } | 286 } |
295 } | 287 } |
296 // The new block inherits the block id of the last successor to maintain | 288 // The new block inherits the block id of the last successor to maintain |
297 // the order of phi inputs at its successors consistent with block ids. | 289 // the order of phi inputs at its successors consistent with block ids. |
298 if (successor != NULL) { | 290 if (successor != NULL) { |
299 block->set_block_id(successor->block_id()); | 291 block->set_block_id(successor->block_id()); |
300 } | 292 } |
301 } | 293 } |
302 // Recompute block order after changes were made. | 294 // Recompute block order after changes were made. |
(...skipping 11 matching lines...) Expand all Loading... |
314 return count; | 306 return count; |
315 } | 307 } |
316 | 308 |
317 | 309 |
318 static void VerifyUseListsInInstruction(Instruction* instr) { | 310 static void VerifyUseListsInInstruction(Instruction* instr) { |
319 ASSERT(instr != NULL); | 311 ASSERT(instr != NULL); |
320 ASSERT(!instr->IsJoinEntry()); | 312 ASSERT(!instr->IsJoinEntry()); |
321 for (intptr_t i = 0; i < instr->InputCount(); ++i) { | 313 for (intptr_t i = 0; i < instr->InputCount(); ++i) { |
322 Value* use = instr->InputAt(i); | 314 Value* use = instr->InputAt(i); |
323 ASSERT(use->definition() != NULL); | 315 ASSERT(use->definition() != NULL); |
324 ASSERT((use->definition() != instr) || | 316 ASSERT((use->definition() != instr) || use->definition()->IsPhi() || |
325 use->definition()->IsPhi() || | |
326 use->definition()->IsMaterializeObject()); | 317 use->definition()->IsMaterializeObject()); |
327 ASSERT(use->instruction() == instr); | 318 ASSERT(use->instruction() == instr); |
328 ASSERT(use->use_index() == i); | 319 ASSERT(use->use_index() == i); |
329 ASSERT(!FLAG_verify_compiler || | 320 ASSERT(!FLAG_verify_compiler || |
330 (1 == MembershipCount(use, use->definition()->input_use_list()))); | 321 (1 == MembershipCount(use, use->definition()->input_use_list()))); |
331 } | 322 } |
332 if (instr->env() != NULL) { | 323 if (instr->env() != NULL) { |
333 intptr_t use_index = 0; | 324 intptr_t use_index = 0; |
334 for (Environment::DeepIterator it(instr->env()); !it.Done(); it.Advance()) { | 325 for (Environment::DeepIterator it(instr->env()); !it.Done(); it.Advance()) { |
335 Value* use = it.CurrentValue(); | 326 Value* use = it.CurrentValue(); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
374 ASSERT(instr->IsBlockEntry() || | 365 ASSERT(instr->IsBlockEntry() || |
375 (instr->IsPhi() && instr->AsPhi()->is_alive()) || | 366 (instr->IsPhi() && instr->AsPhi()->is_alive()) || |
376 (instr->previous() != NULL)); | 367 (instr->previous() != NULL)); |
377 prev = curr; | 368 prev = curr; |
378 curr = curr->next_use(); | 369 curr = curr->next_use(); |
379 } | 370 } |
380 } | 371 } |
381 } | 372 } |
382 | 373 |
383 void FlowGraph::ComputeIsReceiverRecursive( | 374 void FlowGraph::ComputeIsReceiverRecursive( |
384 PhiInstr* phi, GrowableArray<PhiInstr*>* unmark) const { | 375 PhiInstr* phi, |
| 376 GrowableArray<PhiInstr*>* unmark) const { |
385 if (phi->is_receiver() != PhiInstr::kUnknownReceiver) return; | 377 if (phi->is_receiver() != PhiInstr::kUnknownReceiver) return; |
386 phi->set_is_receiver(PhiInstr::kReceiver); | 378 phi->set_is_receiver(PhiInstr::kReceiver); |
387 for (intptr_t i = 0; i < phi->InputCount(); ++i) { | 379 for (intptr_t i = 0; i < phi->InputCount(); ++i) { |
388 Definition* def = phi->InputAt(i)->definition(); | 380 Definition* def = phi->InputAt(i)->definition(); |
389 if (def->IsParameter() && (def->AsParameter()->index() == 0)) continue; | 381 if (def->IsParameter() && (def->AsParameter()->index() == 0)) continue; |
390 if (!def->IsPhi()) { | 382 if (!def->IsPhi()) { |
391 phi->set_is_receiver(PhiInstr::kNotReceiver); | 383 phi->set_is_receiver(PhiInstr::kNotReceiver); |
392 break; | 384 break; |
393 } | 385 } |
394 ComputeIsReceiverRecursive(def->AsPhi(), unmark); | 386 ComputeIsReceiverRecursive(def->AsPhi(), unmark); |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
441 bool FlowGraph::InstanceCallNeedsClassCheck(InstanceCallInstr* call, | 433 bool FlowGraph::InstanceCallNeedsClassCheck(InstanceCallInstr* call, |
442 RawFunction::Kind kind) const { | 434 RawFunction::Kind kind) const { |
443 if (!FLAG_use_cha_deopt && !isolate()->all_classes_finalized()) { | 435 if (!FLAG_use_cha_deopt && !isolate()->all_classes_finalized()) { |
444 // Even if class or function are private, lazy class finalization | 436 // Even if class or function are private, lazy class finalization |
445 // may later add overriding methods. | 437 // may later add overriding methods. |
446 return true; | 438 return true; |
447 } | 439 } |
448 Definition* callee_receiver = call->ArgumentAt(0); | 440 Definition* callee_receiver = call->ArgumentAt(0); |
449 ASSERT(callee_receiver != NULL); | 441 ASSERT(callee_receiver != NULL); |
450 if (function().IsDynamicFunction() && IsReceiver(callee_receiver)) { | 442 if (function().IsDynamicFunction() && IsReceiver(callee_receiver)) { |
451 const String& name = (kind == RawFunction::kMethodExtractor) | 443 const String& name = |
452 ? String::Handle(zone(), Field::NameFromGetter(call->function_name())) | 444 (kind == RawFunction::kMethodExtractor) |
453 : call->function_name(); | 445 ? String::Handle(zone(), |
| 446 Field::NameFromGetter(call->function_name())) |
| 447 : call->function_name(); |
454 const Class& cls = Class::Handle(zone(), function().Owner()); | 448 const Class& cls = Class::Handle(zone(), function().Owner()); |
455 intptr_t subclass_count = 0; | 449 intptr_t subclass_count = 0; |
456 if (!thread()->cha()->HasOverride(cls, name, &subclass_count)) { | 450 if (!thread()->cha()->HasOverride(cls, name, &subclass_count)) { |
457 if (FLAG_trace_cha) { | 451 if (FLAG_trace_cha) { |
458 THR_Print(" **(CHA) Instance call needs no check, " | 452 THR_Print( |
| 453 " **(CHA) Instance call needs no check, " |
459 "no overrides of '%s' '%s'\n", | 454 "no overrides of '%s' '%s'\n", |
460 name.ToCString(), cls.ToCString()); | 455 name.ToCString(), cls.ToCString()); |
461 } | 456 } |
462 thread()->cha()->AddToGuardedClasses(cls, subclass_count); | 457 thread()->cha()->AddToGuardedClasses(cls, subclass_count); |
463 return false; | 458 return false; |
464 } | 459 } |
465 } | 460 } |
466 return true; | 461 return true; |
467 } | 462 } |
468 | 463 |
469 | 464 |
470 | |
471 bool FlowGraph::VerifyUseLists() { | 465 bool FlowGraph::VerifyUseLists() { |
472 // Verify the initial definitions. | 466 // Verify the initial definitions. |
473 for (intptr_t i = 0; i < graph_entry_->initial_definitions()->length(); ++i) { | 467 for (intptr_t i = 0; i < graph_entry_->initial_definitions()->length(); ++i) { |
474 VerifyUseListsInInstruction((*graph_entry_->initial_definitions())[i]); | 468 VerifyUseListsInInstruction((*graph_entry_->initial_definitions())[i]); |
475 } | 469 } |
476 | 470 |
477 // Verify phis in join entries and the instructions in each block. | 471 // Verify phis in join entries and the instructions in each block. |
478 for (intptr_t i = 0; i < preorder_.length(); ++i) { | 472 for (intptr_t i = 0; i < preorder_.length(); ++i) { |
479 BlockEntryInstr* entry = preorder_[i]; | 473 BlockEntryInstr* entry = preorder_[i]; |
480 JoinEntryInstr* join = entry->AsJoinEntry(); | 474 JoinEntryInstr* join = entry->AsJoinEntry(); |
481 if (join != NULL) { | 475 if (join != NULL) { |
482 for (PhiIterator it(join); !it.Done(); it.Advance()) { | 476 for (PhiIterator it(join); !it.Done(); it.Advance()) { |
483 PhiInstr* phi = it.Current(); | 477 PhiInstr* phi = it.Current(); |
484 ASSERT(phi != NULL); | 478 ASSERT(phi != NULL); |
485 VerifyUseListsInInstruction(phi); | 479 VerifyUseListsInInstruction(phi); |
486 } | 480 } |
487 } | 481 } |
488 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { | 482 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
489 VerifyUseListsInInstruction(it.Current()); | 483 VerifyUseListsInInstruction(it.Current()); |
490 } | 484 } |
491 } | 485 } |
492 return true; // Return true so we can ASSERT validation. | 486 return true; // Return true so we can ASSERT validation. |
493 } | 487 } |
494 | 488 |
495 | 489 |
496 LivenessAnalysis::LivenessAnalysis( | 490 LivenessAnalysis::LivenessAnalysis( |
497 intptr_t variable_count, | 491 intptr_t variable_count, |
498 const GrowableArray<BlockEntryInstr*>& postorder) | 492 const GrowableArray<BlockEntryInstr*>& postorder) |
499 : zone_(Thread::Current()->zone()), | 493 : zone_(Thread::Current()->zone()), |
500 variable_count_(variable_count), | 494 variable_count_(variable_count), |
501 postorder_(postorder), | 495 postorder_(postorder), |
502 live_out_(postorder.length()), | 496 live_out_(postorder.length()), |
503 kill_(postorder.length()), | 497 kill_(postorder.length()), |
504 live_in_(postorder.length()) { | 498 live_in_(postorder.length()) {} |
505 } | |
506 | 499 |
507 | 500 |
508 bool LivenessAnalysis::UpdateLiveOut(const BlockEntryInstr& block) { | 501 bool LivenessAnalysis::UpdateLiveOut(const BlockEntryInstr& block) { |
509 BitVector* live_out = live_out_[block.postorder_number()]; | 502 BitVector* live_out = live_out_[block.postorder_number()]; |
510 bool changed = false; | 503 bool changed = false; |
511 Instruction* last = block.last_instruction(); | 504 Instruction* last = block.last_instruction(); |
512 ASSERT(last != NULL); | 505 ASSERT(last != NULL); |
513 for (intptr_t i = 0; i < last->SuccessorCount(); i++) { | 506 for (intptr_t i = 0; i < last->SuccessorCount(); i++) { |
514 BlockEntryInstr* succ = last->SuccessorAt(i); | 507 BlockEntryInstr* succ = last->SuccessorAt(i); |
515 ASSERT(succ != NULL); | 508 ASSERT(succ != NULL); |
(...skipping 30 matching lines...) Expand all Loading... |
546 changed = true; | 539 changed = true; |
547 } | 540 } |
548 } | 541 } |
549 } while (changed); | 542 } while (changed); |
550 } | 543 } |
551 | 544 |
552 | 545 |
553 void LivenessAnalysis::Analyze() { | 546 void LivenessAnalysis::Analyze() { |
554 const intptr_t block_count = postorder_.length(); | 547 const intptr_t block_count = postorder_.length(); |
555 for (intptr_t i = 0; i < block_count; i++) { | 548 for (intptr_t i = 0; i < block_count; i++) { |
556 live_out_.Add(new(zone()) BitVector(zone(), variable_count_)); | 549 live_out_.Add(new (zone()) BitVector(zone(), variable_count_)); |
557 kill_.Add(new(zone()) BitVector(zone(), variable_count_)); | 550 kill_.Add(new (zone()) BitVector(zone(), variable_count_)); |
558 live_in_.Add(new(zone()) BitVector(zone(), variable_count_)); | 551 live_in_.Add(new (zone()) BitVector(zone(), variable_count_)); |
559 } | 552 } |
560 | 553 |
561 ComputeInitialSets(); | 554 ComputeInitialSets(); |
562 ComputeLiveInAndLiveOutSets(); | 555 ComputeLiveInAndLiveOutSets(); |
563 } | 556 } |
564 | 557 |
565 | 558 |
566 static void PrintBitVector(const char* tag, BitVector* v) { | 559 static void PrintBitVector(const char* tag, BitVector* v) { |
567 OS::Print("%s:", tag); | 560 OS::Print("%s:", tag); |
568 for (BitVector::Iterator it(v); !it.Done(); it.Advance()) { | 561 for (BitVector::Iterator it(v); !it.Done(); it.Advance()) { |
(...skipping 23 matching lines...) Expand all Loading... |
592 } | 585 } |
593 | 586 |
594 | 587 |
595 // Computes liveness information for local variables. | 588 // Computes liveness information for local variables. |
596 class VariableLivenessAnalysis : public LivenessAnalysis { | 589 class VariableLivenessAnalysis : public LivenessAnalysis { |
597 public: | 590 public: |
598 explicit VariableLivenessAnalysis(FlowGraph* flow_graph) | 591 explicit VariableLivenessAnalysis(FlowGraph* flow_graph) |
599 : LivenessAnalysis(flow_graph->variable_count(), flow_graph->postorder()), | 592 : LivenessAnalysis(flow_graph->variable_count(), flow_graph->postorder()), |
600 flow_graph_(flow_graph), | 593 flow_graph_(flow_graph), |
601 num_non_copied_params_(flow_graph->num_non_copied_params()), | 594 num_non_copied_params_(flow_graph->num_non_copied_params()), |
602 assigned_vars_() { } | 595 assigned_vars_() {} |
603 | 596 |
604 // For every block (in preorder) compute and return set of variables that | 597 // For every block (in preorder) compute and return set of variables that |
605 // have new assigned values flowing out of that block. | 598 // have new assigned values flowing out of that block. |
606 const GrowableArray<BitVector*>& ComputeAssignedVars() { | 599 const GrowableArray<BitVector*>& ComputeAssignedVars() { |
607 // We can't directly return kill_ because it uses postorder numbering while | 600 // We can't directly return kill_ because it uses postorder numbering while |
608 // SSA construction uses preorder numbering internally. | 601 // SSA construction uses preorder numbering internally. |
609 // We have to permute postorder into preorder. | 602 // We have to permute postorder into preorder. |
610 assigned_vars_.Clear(); | 603 assigned_vars_.Clear(); |
611 | 604 |
612 const intptr_t block_count = flow_graph_->preorder().length(); | 605 const intptr_t block_count = flow_graph_->preorder().length(); |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
665 | 658 |
666 const FlowGraph* flow_graph_; | 659 const FlowGraph* flow_graph_; |
667 const intptr_t num_non_copied_params_; | 660 const intptr_t num_non_copied_params_; |
668 GrowableArray<BitVector*> assigned_vars_; | 661 GrowableArray<BitVector*> assigned_vars_; |
669 }; | 662 }; |
670 | 663 |
671 | 664 |
672 void VariableLivenessAnalysis::ComputeInitialSets() { | 665 void VariableLivenessAnalysis::ComputeInitialSets() { |
673 const intptr_t block_count = postorder_.length(); | 666 const intptr_t block_count = postorder_.length(); |
674 | 667 |
675 BitVector* last_loads = new(zone()) BitVector(zone(), variable_count_); | 668 BitVector* last_loads = new (zone()) BitVector(zone(), variable_count_); |
676 for (intptr_t i = 0; i < block_count; i++) { | 669 for (intptr_t i = 0; i < block_count; i++) { |
677 BlockEntryInstr* block = postorder_[i]; | 670 BlockEntryInstr* block = postorder_[i]; |
678 | 671 |
679 BitVector* kill = kill_[i]; | 672 BitVector* kill = kill_[i]; |
680 BitVector* live_in = live_in_[i]; | 673 BitVector* live_in = live_in_[i]; |
681 last_loads->Clear(); | 674 last_loads->Clear(); |
682 | 675 |
683 // There is an implicit use (load-local) of every local variable at each | 676 // There is an implicit use (load-local) of every local variable at each |
684 // call inside a try{} block and every call has an implicit control-flow | 677 // call inside a try{} block and every call has an implicit control-flow |
685 // to the catch entry. As an approximation we mark all locals as live | 678 // to the catch entry. As an approximation we mark all locals as live |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
736 ASSERT((next_virtual_register_number == 0) || (inlining_parameters != NULL)); | 729 ASSERT((next_virtual_register_number == 0) || (inlining_parameters != NULL)); |
737 current_ssa_temp_index_ = next_virtual_register_number; | 730 current_ssa_temp_index_ = next_virtual_register_number; |
738 GrowableArray<BitVector*> dominance_frontier; | 731 GrowableArray<BitVector*> dominance_frontier; |
739 ComputeDominators(&dominance_frontier); | 732 ComputeDominators(&dominance_frontier); |
740 | 733 |
741 VariableLivenessAnalysis variable_liveness(this); | 734 VariableLivenessAnalysis variable_liveness(this); |
742 variable_liveness.Analyze(); | 735 variable_liveness.Analyze(); |
743 | 736 |
744 GrowableArray<PhiInstr*> live_phis; | 737 GrowableArray<PhiInstr*> live_phis; |
745 | 738 |
746 InsertPhis(preorder_, | 739 InsertPhis(preorder_, variable_liveness.ComputeAssignedVars(), |
747 variable_liveness.ComputeAssignedVars(), | 740 dominance_frontier, &live_phis); |
748 dominance_frontier, | |
749 &live_phis); | |
750 | 741 |
751 | 742 |
752 // Rename uses to reference inserted phis where appropriate. | 743 // Rename uses to reference inserted phis where appropriate. |
753 // Collect phis that reach a non-environment use. | 744 // Collect phis that reach a non-environment use. |
754 Rename(&live_phis, &variable_liveness, inlining_parameters); | 745 Rename(&live_phis, &variable_liveness, inlining_parameters); |
755 | 746 |
756 // Propagate alive mark transitively from alive phis and then remove | 747 // Propagate alive mark transitively from alive phis and then remove |
757 // non-live ones. | 748 // non-live ones. |
758 RemoveDeadPhis(&live_phis); | 749 RemoveDeadPhis(&live_phis); |
759 } | 750 } |
(...skipping 13 matching lines...) Expand all Loading... |
773 // that eliminates a pass by using nearest-common ancestor (NCA) to | 764 // that eliminates a pass by using nearest-common ancestor (NCA) to |
774 // compute immediate dominators from semidominators. It also removes a | 765 // compute immediate dominators from semidominators. It also removes a |
775 // level of indirection in the link-eval forest data structure. | 766 // level of indirection in the link-eval forest data structure. |
776 // | 767 // |
777 // The algorithm is described in Georgiadis, Tarjan, and Werneck's | 768 // The algorithm is described in Georgiadis, Tarjan, and Werneck's |
778 // "Finding Dominators in Practice". | 769 // "Finding Dominators in Practice". |
779 // See http://www.cs.princeton.edu/~rwerneck/dominators/ . | 770 // See http://www.cs.princeton.edu/~rwerneck/dominators/ . |
780 | 771 |
781 // All arrays are maps between preorder basic-block numbers. | 772 // All arrays are maps between preorder basic-block numbers. |
782 intptr_t size = parent_.length(); | 773 intptr_t size = parent_.length(); |
783 GrowableArray<intptr_t> idom(size); // Immediate dominator. | 774 GrowableArray<intptr_t> idom(size); // Immediate dominator. |
784 GrowableArray<intptr_t> semi(size); // Semidominator. | 775 GrowableArray<intptr_t> semi(size); // Semidominator. |
785 GrowableArray<intptr_t> label(size); // Label for link-eval forest. | 776 GrowableArray<intptr_t> label(size); // Label for link-eval forest. |
786 | 777 |
787 // 1. First pass: compute semidominators as in Lengauer-Tarjan. | 778 // 1. First pass: compute semidominators as in Lengauer-Tarjan. |
788 // Semidominators are computed from a depth-first spanning tree and are an | 779 // Semidominators are computed from a depth-first spanning tree and are an |
789 // approximation of immediate dominators. | 780 // approximation of immediate dominators. |
790 | 781 |
791 // Use a link-eval data structure with path compression. Implement path | 782 // Use a link-eval data structure with path compression. Implement path |
792 // compression in place by mutating the parent array. Each block has a | 783 // compression in place by mutating the parent array. Each block has a |
793 // label, which is the minimum block number on the compressed path. | 784 // label, which is the minimum block number on the compressed path. |
794 | 785 |
795 // Initialize idom, semi, and label used by SEMI-NCA. Initialize the | 786 // Initialize idom, semi, and label used by SEMI-NCA. Initialize the |
796 // dominance frontier output array. | 787 // dominance frontier output array. |
797 for (intptr_t i = 0; i < size; ++i) { | 788 for (intptr_t i = 0; i < size; ++i) { |
798 idom.Add(parent_[i]); | 789 idom.Add(parent_[i]); |
799 semi.Add(i); | 790 semi.Add(i); |
800 label.Add(i); | 791 label.Add(i); |
801 dominance_frontier->Add(new(zone()) BitVector(zone(), size)); | 792 dominance_frontier->Add(new (zone()) BitVector(zone(), size)); |
802 } | 793 } |
803 | 794 |
804 // Loop over the blocks in reverse preorder (not including the graph | 795 // Loop over the blocks in reverse preorder (not including the graph |
805 // entry). Clear the dominated blocks in the graph entry in case | 796 // entry). Clear the dominated blocks in the graph entry in case |
806 // ComputeDominators is used to recompute them. | 797 // ComputeDominators is used to recompute them. |
807 preorder_[0]->ClearDominatedBlocks(); | 798 preorder_[0]->ClearDominatedBlocks(); |
808 for (intptr_t block_index = size - 1; block_index >= 1; --block_index) { | 799 for (intptr_t block_index = size - 1; block_index >= 1; --block_index) { |
809 // Loop over the predecessors. | 800 // Loop over the predecessors. |
810 BlockEntryInstr* block = preorder_[block_index]; | 801 BlockEntryInstr* block = preorder_[block_index]; |
811 // Clear the immediately dominated blocks in case ComputeDominators is | 802 // Clear the immediately dominated blocks in case ComputeDominators is |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
870 intptr_t next_index = (*parent)[current_index]; | 861 intptr_t next_index = (*parent)[current_index]; |
871 if (next_index > start_index) { | 862 if (next_index > start_index) { |
872 CompressPath(start_index, next_index, parent, label); | 863 CompressPath(start_index, next_index, parent, label); |
873 (*label)[current_index] = | 864 (*label)[current_index] = |
874 Utils::Minimum((*label)[current_index], (*label)[next_index]); | 865 Utils::Minimum((*label)[current_index], (*label)[next_index]); |
875 (*parent)[current_index] = (*parent)[next_index]; | 866 (*parent)[current_index] = (*parent)[next_index]; |
876 } | 867 } |
877 } | 868 } |
878 | 869 |
879 | 870 |
880 void FlowGraph::InsertPhis( | 871 void FlowGraph::InsertPhis(const GrowableArray<BlockEntryInstr*>& preorder, |
881 const GrowableArray<BlockEntryInstr*>& preorder, | 872 const GrowableArray<BitVector*>& assigned_vars, |
882 const GrowableArray<BitVector*>& assigned_vars, | 873 const GrowableArray<BitVector*>& dom_frontier, |
883 const GrowableArray<BitVector*>& dom_frontier, | 874 GrowableArray<PhiInstr*>* live_phis) { |
884 GrowableArray<PhiInstr*>* live_phis) { | |
885 const intptr_t block_count = preorder.length(); | 875 const intptr_t block_count = preorder.length(); |
886 // Map preorder block number to the highest variable index that has a phi | 876 // Map preorder block number to the highest variable index that has a phi |
887 // in that block. Use it to avoid inserting multiple phis for the same | 877 // in that block. Use it to avoid inserting multiple phis for the same |
888 // variable. | 878 // variable. |
889 GrowableArray<intptr_t> has_already(block_count); | 879 GrowableArray<intptr_t> has_already(block_count); |
890 // Map preorder block number to the highest variable index for which the | 880 // Map preorder block number to the highest variable index for which the |
891 // block went on the worklist. Use it to avoid adding the same block to | 881 // block went on the worklist. Use it to avoid adding the same block to |
892 // the worklist more than once for the same variable. | 882 // the worklist more than once for the same variable. |
893 GrowableArray<intptr_t> work(block_count); | 883 GrowableArray<intptr_t> work(block_count); |
894 | 884 |
895 // Initialize has_already and work. | 885 // Initialize has_already and work. |
896 for (intptr_t block_index = 0; block_index < block_count; ++block_index) { | 886 for (intptr_t block_index = 0; block_index < block_count; ++block_index) { |
897 has_already.Add(-1); | 887 has_already.Add(-1); |
898 work.Add(-1); | 888 work.Add(-1); |
899 } | 889 } |
900 | 890 |
901 // Insert phis for each variable in turn. | 891 // Insert phis for each variable in turn. |
902 GrowableArray<BlockEntryInstr*> worklist; | 892 GrowableArray<BlockEntryInstr*> worklist; |
903 for (intptr_t var_index = 0; var_index < variable_count(); ++var_index) { | 893 for (intptr_t var_index = 0; var_index < variable_count(); ++var_index) { |
904 const bool always_live = !FLAG_prune_dead_locals || | 894 const bool always_live = |
905 (var_index == CurrentContextEnvIndex()); | 895 !FLAG_prune_dead_locals || (var_index == CurrentContextEnvIndex()); |
906 // Add to the worklist each block containing an assignment. | 896 // Add to the worklist each block containing an assignment. |
907 for (intptr_t block_index = 0; block_index < block_count; ++block_index) { | 897 for (intptr_t block_index = 0; block_index < block_count; ++block_index) { |
908 if (assigned_vars[block_index]->Contains(var_index)) { | 898 if (assigned_vars[block_index]->Contains(var_index)) { |
909 work[block_index] = var_index; | 899 work[block_index] = var_index; |
910 worklist.Add(preorder[block_index]); | 900 worklist.Add(preorder[block_index]); |
911 } | 901 } |
912 } | 902 } |
913 | 903 |
914 while (!worklist.is_empty()) { | 904 while (!worklist.is_empty()) { |
915 BlockEntryInstr* current = worklist.RemoveLast(); | 905 BlockEntryInstr* current = worklist.RemoveLast(); |
916 // Ensure a phi for each block in the dominance frontier of current. | 906 // Ensure a phi for each block in the dominance frontier of current. |
917 for (BitVector::Iterator it(dom_frontier[current->preorder_number()]); | 907 for (BitVector::Iterator it(dom_frontier[current->preorder_number()]); |
918 !it.Done(); | 908 !it.Done(); it.Advance()) { |
919 it.Advance()) { | |
920 int index = it.Current(); | 909 int index = it.Current(); |
921 if (has_already[index] < var_index) { | 910 if (has_already[index] < var_index) { |
922 BlockEntryInstr* block = preorder[index]; | 911 BlockEntryInstr* block = preorder[index]; |
923 ASSERT(block->IsJoinEntry()); | 912 ASSERT(block->IsJoinEntry()); |
924 PhiInstr* phi = block->AsJoinEntry()->InsertPhi(var_index, | 913 PhiInstr* phi = |
925 variable_count()); | 914 block->AsJoinEntry()->InsertPhi(var_index, variable_count()); |
926 if (always_live) { | 915 if (always_live) { |
927 phi->mark_alive(); | 916 phi->mark_alive(); |
928 live_phis->Add(phi); | 917 live_phis->Add(phi); |
929 } | 918 } |
930 has_already[index] = var_index; | 919 has_already[index] = var_index; |
931 if (work[index] < var_index) { | 920 if (work[index] < var_index) { |
932 work[index] = var_index; | 921 work[index] = var_index; |
933 worklist.Add(block); | 922 worklist.Add(block); |
934 } | 923 } |
935 } | 924 } |
936 } | 925 } |
937 } | 926 } |
938 } | 927 } |
939 } | 928 } |
940 | 929 |
941 | 930 |
942 void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis, | 931 void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis, |
943 VariableLivenessAnalysis* variable_liveness, | 932 VariableLivenessAnalysis* variable_liveness, |
944 ZoneGrowableArray<Definition*>* inlining_parameters) { | 933 ZoneGrowableArray<Definition*>* inlining_parameters) { |
945 GraphEntryInstr* entry = graph_entry(); | 934 GraphEntryInstr* entry = graph_entry(); |
946 | 935 |
947 // Initial renaming environment. | 936 // Initial renaming environment. |
948 GrowableArray<Definition*> env(variable_count()); | 937 GrowableArray<Definition*> env(variable_count()); |
949 | 938 |
950 // Add global constants to the initial definitions. | 939 // Add global constants to the initial definitions. |
951 constant_null_ = GetConstant(Object::ZoneHandle()); | 940 constant_null_ = GetConstant(Object::ZoneHandle()); |
952 constant_dead_ = GetConstant(Symbols::OptimizedOut()); | 941 constant_dead_ = GetConstant(Symbols::OptimizedOut()); |
953 constant_empty_context_ = GetConstant(Context::Handle( | 942 constant_empty_context_ = |
954 isolate()->object_store()->empty_context())); | 943 GetConstant(Context::Handle(isolate()->object_store()->empty_context())); |
955 | 944 |
956 // Add parameters to the initial definitions and renaming environment. | 945 // Add parameters to the initial definitions and renaming environment. |
957 if (inlining_parameters != NULL) { | 946 if (inlining_parameters != NULL) { |
958 // Use known parameters. | 947 // Use known parameters. |
959 ASSERT(parameter_count() == inlining_parameters->length()); | 948 ASSERT(parameter_count() == inlining_parameters->length()); |
960 for (intptr_t i = 0; i < parameter_count(); ++i) { | 949 for (intptr_t i = 0; i < parameter_count(); ++i) { |
961 Definition* defn = (*inlining_parameters)[i]; | 950 Definition* defn = (*inlining_parameters)[i]; |
962 AllocateSSAIndexes(defn); | 951 AllocateSSAIndexes(defn); |
963 AddToInitialDefinitions(defn); | 952 AddToInitialDefinitions(defn); |
964 env.Add(defn); | 953 env.Add(defn); |
965 } | 954 } |
966 } else { | 955 } else { |
967 // Create new parameters. For functions compiled for OSR, the locals | 956 // Create new parameters. For functions compiled for OSR, the locals |
968 // are unknown and so treated like parameters. | 957 // are unknown and so treated like parameters. |
969 intptr_t count = IsCompiledForOsr() ? variable_count() : parameter_count(); | 958 intptr_t count = IsCompiledForOsr() ? variable_count() : parameter_count(); |
970 for (intptr_t i = 0; i < count; ++i) { | 959 for (intptr_t i = 0; i < count; ++i) { |
971 ParameterInstr* param = new(zone()) ParameterInstr(i, entry); | 960 ParameterInstr* param = new (zone()) ParameterInstr(i, entry); |
972 param->set_ssa_temp_index(alloc_ssa_temp_index()); // New SSA temp. | 961 param->set_ssa_temp_index(alloc_ssa_temp_index()); // New SSA temp. |
973 AddToInitialDefinitions(param); | 962 AddToInitialDefinitions(param); |
974 env.Add(param); | 963 env.Add(param); |
975 } | 964 } |
976 } | 965 } |
977 | 966 |
978 // Initialize all locals in the renaming environment For OSR, the locals have | 967 // Initialize all locals in the renaming environment For OSR, the locals have |
979 // already been handled as parameters. | 968 // already been handled as parameters. |
980 if (!IsCompiledForOsr()) { | 969 if (!IsCompiledForOsr()) { |
981 for (intptr_t i = parameter_count(); i < variable_count(); ++i) { | 970 for (intptr_t i = parameter_count(); i < variable_count(); ++i) { |
(...skipping 18 matching lines...) Expand all Loading... |
1000 // on entry to the catch. | 989 // on entry to the catch. |
1001 entry->set_fixed_slot_count(num_stack_locals() + num_copied_params()); | 990 entry->set_fixed_slot_count(num_stack_locals() + num_copied_params()); |
1002 } | 991 } |
1003 RenameRecursive(entry, &env, live_phis, variable_liveness); | 992 RenameRecursive(entry, &env, live_phis, variable_liveness); |
1004 } | 993 } |
1005 | 994 |
1006 | 995 |
1007 void FlowGraph::AttachEnvironment(Instruction* instr, | 996 void FlowGraph::AttachEnvironment(Instruction* instr, |
1008 GrowableArray<Definition*>* env) { | 997 GrowableArray<Definition*>* env) { |
1009 Environment* deopt_env = | 998 Environment* deopt_env = |
1010 Environment::From(zone(), | 999 Environment::From(zone(), *env, num_non_copied_params_, parsed_function_); |
1011 *env, | |
1012 num_non_copied_params_, | |
1013 parsed_function_); | |
1014 if (instr->IsClosureCall()) { | 1000 if (instr->IsClosureCall()) { |
1015 deopt_env = deopt_env->DeepCopy(zone(), | 1001 deopt_env = |
1016 deopt_env->Length() - instr->InputCount()); | 1002 deopt_env->DeepCopy(zone(), deopt_env->Length() - instr->InputCount()); |
1017 } | 1003 } |
1018 instr->SetEnvironment(deopt_env); | 1004 instr->SetEnvironment(deopt_env); |
1019 for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) { | 1005 for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) { |
1020 Value* use = it.CurrentValue(); | 1006 Value* use = it.CurrentValue(); |
1021 use->definition()->AddEnvUse(use); | 1007 use->definition()->AddEnvUse(use); |
1022 } | 1008 } |
1023 if (instr->CanDeoptimize()) { | 1009 if (instr->CanDeoptimize()) { |
1024 instr->env()->set_deopt_id(instr->deopt_id()); | 1010 instr->env()->set_deopt_id(instr->deopt_id()); |
1025 } | 1011 } |
1026 } | 1012 } |
(...skipping 20 matching lines...) Expand all Loading... |
1047 // more redundant phis. | 1033 // more redundant phis. |
1048 phi->mark_alive(); | 1034 phi->mark_alive(); |
1049 live_phis->Add(phi); | 1035 live_phis->Add(phi); |
1050 } | 1036 } |
1051 } | 1037 } |
1052 } | 1038 } |
1053 } | 1039 } |
1054 } else if (block_entry->IsCatchBlockEntry()) { | 1040 } else if (block_entry->IsCatchBlockEntry()) { |
1055 // Add real definitions for all locals and parameters. | 1041 // Add real definitions for all locals and parameters. |
1056 for (intptr_t i = 0; i < env->length(); ++i) { | 1042 for (intptr_t i = 0; i < env->length(); ++i) { |
1057 ParameterInstr* param = new(zone()) ParameterInstr(i, block_entry); | 1043 ParameterInstr* param = new (zone()) ParameterInstr(i, block_entry); |
1058 param->set_ssa_temp_index(alloc_ssa_temp_index()); // New SSA temp. | 1044 param->set_ssa_temp_index(alloc_ssa_temp_index()); // New SSA temp. |
1059 (*env)[i] = param; | 1045 (*env)[i] = param; |
1060 block_entry->AsCatchBlockEntry()->initial_definitions()->Add(param); | 1046 block_entry->AsCatchBlockEntry()->initial_definitions()->Add(param); |
1061 } | 1047 } |
1062 } | 1048 } |
1063 | 1049 |
1064 // Prune non-live variables at block entry by replacing their environment | 1050 // Prune non-live variables at block entry by replacing their environment |
1065 // slots with null. | 1051 // slots with null. |
1066 BitVector* live_in = variable_liveness->GetLiveInSet(block_entry); | 1052 BitVector* live_in = variable_liveness->GetLiveInSet(block_entry); |
1067 for (intptr_t i = 0; i < variable_count(); i++) { | 1053 for (intptr_t i = 0; i < variable_count(); i++) { |
1068 // TODO(fschneider): Make sure that live_in always contains the | 1054 // TODO(fschneider): Make sure that live_in always contains the |
1069 // CurrentContext variable to avoid the special case here. | 1055 // CurrentContext variable to avoid the special case here. |
1070 if (FLAG_prune_dead_locals && | 1056 if (FLAG_prune_dead_locals && !live_in->Contains(i) && |
1071 !live_in->Contains(i) && | |
1072 (i != CurrentContextEnvIndex())) { | 1057 (i != CurrentContextEnvIndex())) { |
1073 (*env)[i] = constant_dead(); | 1058 (*env)[i] = constant_dead(); |
1074 } | 1059 } |
1075 } | 1060 } |
1076 | 1061 |
1077 // Attach environment to the block entry. | 1062 // Attach environment to the block entry. |
1078 AttachEnvironment(block_entry, env); | 1063 AttachEnvironment(block_entry, env); |
1079 | 1064 |
1080 // 2. Process normal instructions. | 1065 // 2. Process normal instructions. |
1081 for (ForwardInstructionIterator it(block_entry); !it.Done(); it.Advance()) { | 1066 for (ForwardInstructionIterator it(block_entry); !it.Done(); it.Advance()) { |
(...skipping 10 matching lines...) Expand all Loading... |
1092 // For each use of a LoadLocal, StoreLocal, DropTemps or Constant: Replace | 1077 // For each use of a LoadLocal, StoreLocal, DropTemps or Constant: Replace |
1093 // it with the renamed value. | 1078 // it with the renamed value. |
1094 for (intptr_t i = current->InputCount() - 1; i >= 0; --i) { | 1079 for (intptr_t i = current->InputCount() - 1; i >= 0; --i) { |
1095 Value* v = current->InputAt(i); | 1080 Value* v = current->InputAt(i); |
1096 // Update expression stack. | 1081 // Update expression stack. |
1097 ASSERT(env->length() > variable_count()); | 1082 ASSERT(env->length() > variable_count()); |
1098 | 1083 |
1099 Definition* reaching_defn = env->RemoveLast(); | 1084 Definition* reaching_defn = env->RemoveLast(); |
1100 Definition* input_defn = v->definition(); | 1085 Definition* input_defn = v->definition(); |
1101 if (input_defn != reaching_defn) { | 1086 if (input_defn != reaching_defn) { |
1102 ASSERT(input_defn->IsLoadLocal() || | 1087 ASSERT(input_defn->IsLoadLocal() || input_defn->IsStoreLocal() || |
1103 input_defn->IsStoreLocal() || | 1088 input_defn->IsDropTemps() || input_defn->IsConstant()); |
1104 input_defn->IsDropTemps() || | |
1105 input_defn->IsConstant()); | |
1106 // Assert we are not referencing nulls in the initial environment. | 1089 // Assert we are not referencing nulls in the initial environment. |
1107 ASSERT(reaching_defn->ssa_temp_index() != -1); | 1090 ASSERT(reaching_defn->ssa_temp_index() != -1); |
1108 v->set_definition(reaching_defn); | 1091 v->set_definition(reaching_defn); |
1109 input_defn = reaching_defn; | 1092 input_defn = reaching_defn; |
1110 } | 1093 } |
1111 input_defn->AddInputUse(v); | 1094 input_defn->AddInputUse(v); |
1112 } | 1095 } |
1113 | 1096 |
1114 // Drop pushed arguments for calls. | 1097 // Drop pushed arguments for calls. |
1115 for (intptr_t j = 0; j < current->ArgumentCount(); j++) { | 1098 for (intptr_t j = 0; j < current->ArgumentCount(); j++) { |
1116 env->RemoveLast(); | 1099 env->RemoveLast(); |
1117 } | 1100 } |
1118 | 1101 |
1119 // 2b. Handle LoadLocal, StoreLocal, DropTemps and Constant. | 1102 // 2b. Handle LoadLocal, StoreLocal, DropTemps and Constant. |
1120 Definition* definition = current->AsDefinition(); | 1103 Definition* definition = current->AsDefinition(); |
1121 if (definition != NULL) { | 1104 if (definition != NULL) { |
1122 LoadLocalInstr* load = definition->AsLoadLocal(); | 1105 LoadLocalInstr* load = definition->AsLoadLocal(); |
1123 StoreLocalInstr* store = definition->AsStoreLocal(); | 1106 StoreLocalInstr* store = definition->AsStoreLocal(); |
1124 DropTempsInstr* drop = definition->AsDropTemps(); | 1107 DropTempsInstr* drop = definition->AsDropTemps(); |
1125 ConstantInstr* constant = definition->AsConstant(); | 1108 ConstantInstr* constant = definition->AsConstant(); |
1126 if ((load != NULL) || | 1109 if ((load != NULL) || (store != NULL) || (drop != NULL) || |
1127 (store != NULL) || | |
1128 (drop != NULL) || | |
1129 (constant != NULL)) { | 1110 (constant != NULL)) { |
1130 Definition* result = NULL; | 1111 Definition* result = NULL; |
1131 if (store != NULL) { | 1112 if (store != NULL) { |
1132 // Update renaming environment. | 1113 // Update renaming environment. |
1133 intptr_t index = store->local().BitIndexIn(num_non_copied_params_); | 1114 intptr_t index = store->local().BitIndexIn(num_non_copied_params_); |
1134 result = store->value()->definition(); | 1115 result = store->value()->definition(); |
1135 | 1116 |
1136 if (!FLAG_prune_dead_locals || | 1117 if (!FLAG_prune_dead_locals || |
1137 variable_liveness->IsStoreAlive(block_entry, store)) { | 1118 variable_liveness->IsStoreAlive(block_entry, store)) { |
1138 (*env)[index] = result; | 1119 (*env)[index] = result; |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1213 block_entry->last_instruction()->SuccessorAt(0)->IsJoinEntry()) { | 1194 block_entry->last_instruction()->SuccessorAt(0)->IsJoinEntry()) { |
1214 JoinEntryInstr* successor = | 1195 JoinEntryInstr* successor = |
1215 block_entry->last_instruction()->SuccessorAt(0)->AsJoinEntry(); | 1196 block_entry->last_instruction()->SuccessorAt(0)->AsJoinEntry(); |
1216 intptr_t pred_index = successor->IndexOfPredecessor(block_entry); | 1197 intptr_t pred_index = successor->IndexOfPredecessor(block_entry); |
1217 ASSERT(pred_index >= 0); | 1198 ASSERT(pred_index >= 0); |
1218 if (successor->phis() != NULL) { | 1199 if (successor->phis() != NULL) { |
1219 for (intptr_t i = 0; i < successor->phis()->length(); ++i) { | 1200 for (intptr_t i = 0; i < successor->phis()->length(); ++i) { |
1220 PhiInstr* phi = (*successor->phis())[i]; | 1201 PhiInstr* phi = (*successor->phis())[i]; |
1221 if (phi != NULL) { | 1202 if (phi != NULL) { |
1222 // Rename input operand. | 1203 // Rename input operand. |
1223 Value* use = new(zone()) Value((*env)[i]); | 1204 Value* use = new (zone()) Value((*env)[i]); |
1224 phi->SetInputAt(pred_index, use); | 1205 phi->SetInputAt(pred_index, use); |
1225 } | 1206 } |
1226 } | 1207 } |
1227 } | 1208 } |
1228 } | 1209 } |
1229 } | 1210 } |
1230 | 1211 |
1231 | 1212 |
1232 void FlowGraph::RemoveDeadPhis(GrowableArray<PhiInstr*>* live_phis) { | 1213 void FlowGraph::RemoveDeadPhis(GrowableArray<PhiInstr*>* live_phis) { |
1233 // Augment live_phis with those that have implicit real used at | 1214 // Augment live_phis with those that have implicit real used at |
1234 // potentially throwing instructions if there is a try-catch in this graph. | 1215 // potentially throwing instructions if there is a try-catch in this graph. |
1235 if (graph_entry()->SuccessorCount() > 1) { | 1216 if (graph_entry()->SuccessorCount() > 1) { |
1236 for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) { | 1217 for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) { |
1237 JoinEntryInstr* join = it.Current()->AsJoinEntry(); | 1218 JoinEntryInstr* join = it.Current()->AsJoinEntry(); |
1238 if (join == NULL) continue; | 1219 if (join == NULL) continue; |
1239 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) { | 1220 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) { |
1240 PhiInstr* phi = phi_it.Current(); | 1221 PhiInstr* phi = phi_it.Current(); |
1241 if (phi == NULL || | 1222 if (phi == NULL || phi->is_alive() || (phi->input_use_list() != NULL) || |
1242 phi->is_alive() || | |
1243 (phi->input_use_list() != NULL) || | |
1244 (phi->env_use_list() == NULL)) { | 1223 (phi->env_use_list() == NULL)) { |
1245 continue; | 1224 continue; |
1246 } | 1225 } |
1247 for (Value::Iterator it(phi->env_use_list()); | 1226 for (Value::Iterator it(phi->env_use_list()); !it.Done(); |
1248 !it.Done(); | |
1249 it.Advance()) { | 1227 it.Advance()) { |
1250 Value* use = it.Current(); | 1228 Value* use = it.Current(); |
1251 if (use->instruction()->MayThrow() && | 1229 if (use->instruction()->MayThrow() && |
1252 use->instruction()->GetBlock()->InsideTryBlock()) { | 1230 use->instruction()->GetBlock()->InsideTryBlock()) { |
1253 live_phis->Add(phi); | 1231 live_phis->Add(phi); |
1254 phi->mark_alive(); | 1232 phi->mark_alive(); |
1255 break; | 1233 break; |
1256 } | 1234 } |
1257 } | 1235 } |
1258 } | 1236 } |
(...skipping 14 matching lines...) Expand all Loading... |
1273 | 1251 |
1274 for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) { | 1252 for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) { |
1275 JoinEntryInstr* join = it.Current()->AsJoinEntry(); | 1253 JoinEntryInstr* join = it.Current()->AsJoinEntry(); |
1276 if (join != NULL) join->RemoveDeadPhis(constant_dead()); | 1254 if (join != NULL) join->RemoveDeadPhis(constant_dead()); |
1277 } | 1255 } |
1278 } | 1256 } |
1279 | 1257 |
1280 | 1258 |
1281 void FlowGraph::RemoveRedefinitions() { | 1259 void FlowGraph::RemoveRedefinitions() { |
1282 // Remove redefinition instructions inserted to inhibit hoisting. | 1260 // Remove redefinition instructions inserted to inhibit hoisting. |
1283 for (BlockIterator block_it = reverse_postorder_iterator(); | 1261 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); |
1284 !block_it.Done(); | |
1285 block_it.Advance()) { | 1262 block_it.Advance()) { |
1286 for (ForwardInstructionIterator instr_it(block_it.Current()); | 1263 for (ForwardInstructionIterator instr_it(block_it.Current()); |
1287 !instr_it.Done(); | 1264 !instr_it.Done(); instr_it.Advance()) { |
1288 instr_it.Advance()) { | |
1289 RedefinitionInstr* redefinition = instr_it.Current()->AsRedefinition(); | 1265 RedefinitionInstr* redefinition = instr_it.Current()->AsRedefinition(); |
1290 if (redefinition != NULL) { | 1266 if (redefinition != NULL) { |
1291 Definition* original; | 1267 Definition* original; |
1292 do { | 1268 do { |
1293 original = redefinition->value()->definition(); | 1269 original = redefinition->value()->definition(); |
1294 } while (original->IsRedefinition()); | 1270 } while (original->IsRedefinition()); |
1295 redefinition->ReplaceUsesWith(original); | 1271 redefinition->ReplaceUsesWith(original); |
1296 instr_it.RemoveCurrentFromGraph(); | 1272 instr_it.RemoveCurrentFromGraph(); |
1297 } | 1273 } |
1298 } | 1274 } |
1299 } | 1275 } |
1300 } | 1276 } |
1301 | 1277 |
1302 | 1278 |
1303 // Find the natural loop for the back edge m->n and attach loop information | 1279 // Find the natural loop for the back edge m->n and attach loop information |
1304 // to block n (loop header). The algorithm is described in "Advanced Compiler | 1280 // to block n (loop header). The algorithm is described in "Advanced Compiler |
1305 // Design & Implementation" (Muchnick) p192. | 1281 // Design & Implementation" (Muchnick) p192. |
1306 BitVector* FlowGraph::FindLoop(BlockEntryInstr* m, BlockEntryInstr* n) const { | 1282 BitVector* FlowGraph::FindLoop(BlockEntryInstr* m, BlockEntryInstr* n) const { |
1307 GrowableArray<BlockEntryInstr*> stack; | 1283 GrowableArray<BlockEntryInstr*> stack; |
1308 BitVector* loop = new(zone()) BitVector(zone(), preorder_.length()); | 1284 BitVector* loop = new (zone()) BitVector(zone(), preorder_.length()); |
1309 | 1285 |
1310 loop->Add(n->preorder_number()); | 1286 loop->Add(n->preorder_number()); |
1311 if (n != m) { | 1287 if (n != m) { |
1312 loop->Add(m->preorder_number()); | 1288 loop->Add(m->preorder_number()); |
1313 stack.Add(m); | 1289 stack.Add(m); |
1314 } | 1290 } |
1315 | 1291 |
1316 while (!stack.is_empty()) { | 1292 while (!stack.is_empty()) { |
1317 BlockEntryInstr* p = stack.RemoveLast(); | 1293 BlockEntryInstr* p = stack.RemoveLast(); |
1318 for (intptr_t i = 0; i < p->PredecessorCount(); ++i) { | 1294 for (intptr_t i = 0; i < p->PredecessorCount(); ++i) { |
1319 BlockEntryInstr* q = p->PredecessorAt(i); | 1295 BlockEntryInstr* q = p->PredecessorAt(i); |
1320 if (!loop->Contains(q->preorder_number())) { | 1296 if (!loop->Contains(q->preorder_number())) { |
1321 loop->Add(q->preorder_number()); | 1297 loop->Add(q->preorder_number()); |
1322 stack.Add(q); | 1298 stack.Add(q); |
1323 } | 1299 } |
1324 } | 1300 } |
1325 } | 1301 } |
1326 return loop; | 1302 return loop; |
1327 } | 1303 } |
1328 | 1304 |
1329 | 1305 |
1330 ZoneGrowableArray<BlockEntryInstr*>* FlowGraph::ComputeLoops() const { | 1306 ZoneGrowableArray<BlockEntryInstr*>* FlowGraph::ComputeLoops() const { |
1331 ZoneGrowableArray<BlockEntryInstr*>* loop_headers = | 1307 ZoneGrowableArray<BlockEntryInstr*>* loop_headers = |
1332 new(zone()) ZoneGrowableArray<BlockEntryInstr*>(); | 1308 new (zone()) ZoneGrowableArray<BlockEntryInstr*>(); |
1333 | 1309 |
1334 for (BlockIterator it = postorder_iterator(); | 1310 for (BlockIterator it = postorder_iterator(); !it.Done(); it.Advance()) { |
1335 !it.Done(); | |
1336 it.Advance()) { | |
1337 BlockEntryInstr* block = it.Current(); | 1311 BlockEntryInstr* block = it.Current(); |
1338 for (intptr_t i = 0; i < block->PredecessorCount(); ++i) { | 1312 for (intptr_t i = 0; i < block->PredecessorCount(); ++i) { |
1339 BlockEntryInstr* pred = block->PredecessorAt(i); | 1313 BlockEntryInstr* pred = block->PredecessorAt(i); |
1340 if (block->Dominates(pred)) { | 1314 if (block->Dominates(pred)) { |
1341 if (FLAG_trace_optimization) { | 1315 if (FLAG_trace_optimization) { |
1342 OS::Print("Back edge B%" Pd " -> B%" Pd "\n", pred->block_id(), | 1316 OS::Print("Back edge B%" Pd " -> B%" Pd "\n", pred->block_id(), |
1343 block->block_id()); | 1317 block->block_id()); |
1344 } | 1318 } |
1345 BitVector* loop_info = FindLoop(pred, block); | 1319 BitVector* loop_info = FindLoop(pred, block); |
1346 // Loops that share the same loop header are treated as one loop. | 1320 // Loops that share the same loop header are treated as one loop. |
(...skipping 10 matching lines...) Expand all Loading... |
1357 block->set_loop_info(loop_info); | 1331 block->set_loop_info(loop_info); |
1358 loop_headers->Add(block); | 1332 loop_headers->Add(block); |
1359 } | 1333 } |
1360 } | 1334 } |
1361 } | 1335 } |
1362 } | 1336 } |
1363 if (FLAG_trace_optimization) { | 1337 if (FLAG_trace_optimization) { |
1364 for (intptr_t i = 0; i < loop_headers->length(); ++i) { | 1338 for (intptr_t i = 0; i < loop_headers->length(); ++i) { |
1365 BlockEntryInstr* header = (*loop_headers)[i]; | 1339 BlockEntryInstr* header = (*loop_headers)[i]; |
1366 OS::Print("Loop header B%" Pd "\n", header->block_id()); | 1340 OS::Print("Loop header B%" Pd "\n", header->block_id()); |
1367 for (BitVector::Iterator it(header->loop_info()); | 1341 for (BitVector::Iterator it(header->loop_info()); !it.Done(); |
1368 !it.Done(); | |
1369 it.Advance()) { | 1342 it.Advance()) { |
1370 OS::Print(" B%" Pd "\n", preorder_[it.Current()]->block_id()); | 1343 OS::Print(" B%" Pd "\n", preorder_[it.Current()]->block_id()); |
1371 } | 1344 } |
1372 } | 1345 } |
1373 } | 1346 } |
1374 return loop_headers; | 1347 return loop_headers; |
1375 } | 1348 } |
1376 | 1349 |
1377 | 1350 |
1378 intptr_t FlowGraph::InstructionCount() const { | 1351 intptr_t FlowGraph::InstructionCount() const { |
1379 intptr_t size = 0; | 1352 intptr_t size = 0; |
1380 // Iterate each block, skipping the graph entry. | 1353 // Iterate each block, skipping the graph entry. |
1381 for (intptr_t i = 1; i < preorder_.length(); ++i) { | 1354 for (intptr_t i = 1; i < preorder_.length(); ++i) { |
1382 for (ForwardInstructionIterator it(preorder_[i]); | 1355 for (ForwardInstructionIterator it(preorder_[i]); !it.Done(); |
1383 !it.Done(); | |
1384 it.Advance()) { | 1356 it.Advance()) { |
1385 ++size; | 1357 ++size; |
1386 } | 1358 } |
1387 } | 1359 } |
1388 return size; | 1360 return size; |
1389 } | 1361 } |
1390 | 1362 |
1391 | 1363 |
1392 void FlowGraph::ComputeBlockEffects() { | 1364 void FlowGraph::ComputeBlockEffects() { |
1393 block_effects_ = new(zone()) BlockEffects(this); | 1365 block_effects_ = new (zone()) BlockEffects(this); |
1394 } | 1366 } |
1395 | 1367 |
1396 | 1368 |
1397 BlockEffects::BlockEffects(FlowGraph* flow_graph) | 1369 BlockEffects::BlockEffects(FlowGraph* flow_graph) |
1398 : available_at_(flow_graph->postorder().length()) { | 1370 : available_at_(flow_graph->postorder().length()) { |
1399 // We are tracking a single effect. | 1371 // We are tracking a single effect. |
1400 ASSERT(EffectSet::kLastEffect == 1); | 1372 ASSERT(EffectSet::kLastEffect == 1); |
1401 Zone* zone = flow_graph->zone(); | 1373 Zone* zone = flow_graph->zone(); |
1402 const intptr_t block_count = flow_graph->postorder().length(); | 1374 const intptr_t block_count = flow_graph->postorder().length(); |
1403 | 1375 |
1404 // Set of blocks that contain side-effects. | 1376 // Set of blocks that contain side-effects. |
1405 BitVector* kill = new(zone) BitVector(zone, block_count); | 1377 BitVector* kill = new (zone) BitVector(zone, block_count); |
1406 | 1378 |
1407 // Per block available-after sets. Block A is available after the block B if | 1379 // Per block available-after sets. Block A is available after the block B if |
1408 // and only if A is either equal to B or A is available at B and B contains no | 1380 // and only if A is either equal to B or A is available at B and B contains no |
1409 // side-effects. Initially we consider all blocks available after all other | 1381 // side-effects. Initially we consider all blocks available after all other |
1410 // blocks. | 1382 // blocks. |
1411 GrowableArray<BitVector*> available_after(block_count); | 1383 GrowableArray<BitVector*> available_after(block_count); |
1412 | 1384 |
1413 // Discover all blocks with side-effects. | 1385 // Discover all blocks with side-effects. |
1414 for (BlockIterator it = flow_graph->postorder_iterator(); | 1386 for (BlockIterator it = flow_graph->postorder_iterator(); !it.Done(); |
1415 !it.Done(); | |
1416 it.Advance()) { | 1387 it.Advance()) { |
1417 available_at_.Add(NULL); | 1388 available_at_.Add(NULL); |
1418 available_after.Add(NULL); | 1389 available_after.Add(NULL); |
1419 | 1390 |
1420 BlockEntryInstr* block = it.Current(); | 1391 BlockEntryInstr* block = it.Current(); |
1421 for (ForwardInstructionIterator it(block); | 1392 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |
1422 !it.Done(); | |
1423 it.Advance()) { | |
1424 if (!it.Current()->Effects().IsNone()) { | 1393 if (!it.Current()->Effects().IsNone()) { |
1425 kill->Add(block->postorder_number()); | 1394 kill->Add(block->postorder_number()); |
1426 break; | 1395 break; |
1427 } | 1396 } |
1428 } | 1397 } |
1429 } | 1398 } |
1430 | 1399 |
1431 BitVector* temp = new(zone) BitVector(zone, block_count); | 1400 BitVector* temp = new (zone) BitVector(zone, block_count); |
1432 | 1401 |
1433 // Recompute available-at based on predecessors' available-after until the fix | 1402 // Recompute available-at based on predecessors' available-after until the fix |
1434 // point is reached. | 1403 // point is reached. |
1435 bool changed; | 1404 bool changed; |
1436 do { | 1405 do { |
1437 changed = false; | 1406 changed = false; |
1438 | 1407 |
1439 for (BlockIterator it = flow_graph->reverse_postorder_iterator(); | 1408 for (BlockIterator it = flow_graph->reverse_postorder_iterator(); |
1440 !it.Done(); | 1409 !it.Done(); it.Advance()) { |
1441 it.Advance()) { | |
1442 BlockEntryInstr* block = it.Current(); | 1410 BlockEntryInstr* block = it.Current(); |
1443 const intptr_t block_num = block->postorder_number(); | 1411 const intptr_t block_num = block->postorder_number(); |
1444 | 1412 |
1445 if (block->IsGraphEntry()) { | 1413 if (block->IsGraphEntry()) { |
1446 temp->Clear(); // Nothing is live-in into graph entry. | 1414 temp->Clear(); // Nothing is live-in into graph entry. |
1447 } else { | 1415 } else { |
1448 // Available-at is an intersection of all predecessors' available-after | 1416 // Available-at is an intersection of all predecessors' available-after |
1449 // sets. | 1417 // sets. |
1450 temp->SetAll(); | 1418 temp->SetAll(); |
1451 for (intptr_t i = 0; i < block->PredecessorCount(); i++) { | 1419 for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |
1452 const intptr_t pred = block->PredecessorAt(i)->postorder_number(); | 1420 const intptr_t pred = block->PredecessorAt(i)->postorder_number(); |
1453 if (available_after[pred] != NULL) { | 1421 if (available_after[pred] != NULL) { |
1454 temp->Intersect(available_after[pred]); | 1422 temp->Intersect(available_after[pred]); |
1455 } | 1423 } |
1456 } | 1424 } |
1457 } | 1425 } |
1458 | 1426 |
1459 BitVector* current = available_at_[block_num]; | 1427 BitVector* current = available_at_[block_num]; |
1460 if ((current == NULL) || !current->Equals(*temp)) { | 1428 if ((current == NULL) || !current->Equals(*temp)) { |
1461 // Available-at changed: update it and recompute available-after. | 1429 // Available-at changed: update it and recompute available-after. |
1462 if (available_at_[block_num] == NULL) { | 1430 if (available_at_[block_num] == NULL) { |
1463 current = available_at_[block_num] = | 1431 current = available_at_[block_num] = |
1464 new(zone) BitVector(zone, block_count); | 1432 new (zone) BitVector(zone, block_count); |
1465 available_after[block_num] = | 1433 available_after[block_num] = new (zone) BitVector(zone, block_count); |
1466 new(zone) BitVector(zone, block_count); | |
1467 // Block is always available after itself. | 1434 // Block is always available after itself. |
1468 available_after[block_num]->Add(block_num); | 1435 available_after[block_num]->Add(block_num); |
1469 } | 1436 } |
1470 current->CopyFrom(temp); | 1437 current->CopyFrom(temp); |
1471 if (!kill->Contains(block_num)) { | 1438 if (!kill->Contains(block_num)) { |
1472 available_after[block_num]->CopyFrom(temp); | 1439 available_after[block_num]->CopyFrom(temp); |
1473 // Block is always available after itself. | 1440 // Block is always available after itself. |
1474 available_after[block_num]->Add(block_num); | 1441 available_after[block_num]->Add(block_num); |
1475 } | 1442 } |
1476 changed = true; | 1443 changed = true; |
1477 } | 1444 } |
1478 } | 1445 } |
1479 } while (changed); | 1446 } while (changed); |
1480 } | 1447 } |
1481 | 1448 |
1482 | 1449 |
1483 bool BlockEffects::IsAvailableAt(Instruction* instr, | 1450 bool BlockEffects::IsAvailableAt(Instruction* instr, |
1484 BlockEntryInstr* block) const { | 1451 BlockEntryInstr* block) const { |
1485 return (instr->Dependencies().IsNone()) || | 1452 return (instr->Dependencies().IsNone()) || |
1486 IsSideEffectFreePath(instr->GetBlock(), block); | 1453 IsSideEffectFreePath(instr->GetBlock(), block); |
1487 } | 1454 } |
1488 | 1455 |
1489 | 1456 |
1490 bool BlockEffects::CanBeMovedTo(Instruction* instr, | 1457 bool BlockEffects::CanBeMovedTo(Instruction* instr, |
1491 BlockEntryInstr* block) const { | 1458 BlockEntryInstr* block) const { |
1492 return (instr->Dependencies().IsNone()) || | 1459 return (instr->Dependencies().IsNone()) || |
1493 IsSideEffectFreePath(block, instr->GetBlock()); | 1460 IsSideEffectFreePath(block, instr->GetBlock()); |
1494 } | 1461 } |
1495 | 1462 |
1496 | 1463 |
1497 bool BlockEffects::IsSideEffectFreePath(BlockEntryInstr* from, | 1464 bool BlockEffects::IsSideEffectFreePath(BlockEntryInstr* from, |
1498 BlockEntryInstr* to) const { | 1465 BlockEntryInstr* to) const { |
1499 return available_at_[to->postorder_number()]->Contains( | 1466 return available_at_[to->postorder_number()]->Contains( |
1500 from->postorder_number()); | 1467 from->postorder_number()); |
1501 } | 1468 } |
1502 | 1469 |
1503 | 1470 |
1504 // Quick access to the current zone. | 1471 // Quick access to the current zone. |
1505 #define Z (zone()) | 1472 #define Z (zone()) |
1506 | 1473 |
1507 | 1474 |
1508 void FlowGraph::ConvertUse(Value* use, Representation from_rep) { | 1475 void FlowGraph::ConvertUse(Value* use, Representation from_rep) { |
1509 const Representation to_rep = | 1476 const Representation to_rep = |
1510 use->instruction()->RequiredInputRepresentation(use->use_index()); | 1477 use->instruction()->RequiredInputRepresentation(use->use_index()); |
1511 if (from_rep == to_rep || to_rep == kNoRepresentation) { | 1478 if (from_rep == to_rep || to_rep == kNoRepresentation) { |
1512 return; | 1479 return; |
1513 } | 1480 } |
1514 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ false); | 1481 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/false); |
1515 } | 1482 } |
1516 | 1483 |
1517 | 1484 |
1518 static bool IsUnboxedInteger(Representation rep) { | 1485 static bool IsUnboxedInteger(Representation rep) { |
1519 return (rep == kUnboxedInt32) || | 1486 return (rep == kUnboxedInt32) || (rep == kUnboxedUint32) || |
1520 (rep == kUnboxedUint32) || | |
1521 (rep == kUnboxedMint); | 1487 (rep == kUnboxedMint); |
1522 } | 1488 } |
1523 | 1489 |
1524 | 1490 |
1525 static bool ShouldInlineSimd() { | 1491 static bool ShouldInlineSimd() { |
1526 return FlowGraphCompiler::SupportsUnboxedSimd128(); | 1492 return FlowGraphCompiler::SupportsUnboxedSimd128(); |
1527 } | 1493 } |
1528 | 1494 |
1529 | 1495 |
1530 static bool CanUnboxDouble() { | 1496 static bool CanUnboxDouble() { |
(...skipping 18 matching lines...) Expand all Loading... |
1549 // For phis conversions have to be inserted in the predecessor. | 1515 // For phis conversions have to be inserted in the predecessor. |
1550 insert_before = | 1516 insert_before = |
1551 phi->block()->PredecessorAt(use->use_index())->last_instruction(); | 1517 phi->block()->PredecessorAt(use->use_index())->last_instruction(); |
1552 deopt_target = NULL; | 1518 deopt_target = NULL; |
1553 } else { | 1519 } else { |
1554 deopt_target = insert_before = use->instruction(); | 1520 deopt_target = insert_before = use->instruction(); |
1555 } | 1521 } |
1556 | 1522 |
1557 Definition* converted = NULL; | 1523 Definition* converted = NULL; |
1558 if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) { | 1524 if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) { |
1559 const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != NULL) ? | 1525 const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != NULL) |
1560 deopt_target->DeoptimizationTarget() : Thread::kNoDeoptId; | 1526 ? deopt_target->DeoptimizationTarget() |
1561 converted = new(Z) UnboxedIntConverterInstr(from, | 1527 : Thread::kNoDeoptId; |
1562 to, | 1528 converted = new (Z) |
1563 use->CopyWithType(), | 1529 UnboxedIntConverterInstr(from, to, use->CopyWithType(), deopt_id); |
1564 deopt_id); | |
1565 } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) { | 1530 } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) { |
1566 converted = new Int32ToDoubleInstr(use->CopyWithType()); | 1531 converted = new Int32ToDoubleInstr(use->CopyWithType()); |
1567 } else if ((from == kUnboxedMint) && | 1532 } else if ((from == kUnboxedMint) && (to == kUnboxedDouble) && |
1568 (to == kUnboxedDouble) && | |
1569 CanConvertUnboxedMintToDouble()) { | 1533 CanConvertUnboxedMintToDouble()) { |
1570 const intptr_t deopt_id = (deopt_target != NULL) ? | 1534 const intptr_t deopt_id = (deopt_target != NULL) |
1571 deopt_target->DeoptimizationTarget() : Thread::kNoDeoptId; | 1535 ? deopt_target->DeoptimizationTarget() |
| 1536 : Thread::kNoDeoptId; |
1572 ASSERT(CanUnboxDouble()); | 1537 ASSERT(CanUnboxDouble()); |
1573 converted = new MintToDoubleInstr(use->CopyWithType(), deopt_id); | 1538 converted = new MintToDoubleInstr(use->CopyWithType(), deopt_id); |
1574 } else if ((from == kTagged) && Boxing::Supports(to)) { | 1539 } else if ((from == kTagged) && Boxing::Supports(to)) { |
1575 const intptr_t deopt_id = (deopt_target != NULL) ? | 1540 const intptr_t deopt_id = (deopt_target != NULL) |
1576 deopt_target->DeoptimizationTarget() : Thread::kNoDeoptId; | 1541 ? deopt_target->DeoptimizationTarget() |
| 1542 : Thread::kNoDeoptId; |
1577 converted = UnboxInstr::Create(to, use->CopyWithType(), deopt_id); | 1543 converted = UnboxInstr::Create(to, use->CopyWithType(), deopt_id); |
1578 } else if ((to == kTagged) && Boxing::Supports(from)) { | 1544 } else if ((to == kTagged) && Boxing::Supports(from)) { |
1579 converted = BoxInstr::Create(from, use->CopyWithType()); | 1545 converted = BoxInstr::Create(from, use->CopyWithType()); |
1580 } else { | 1546 } else { |
1581 // We have failed to find a suitable conversion instruction. | 1547 // We have failed to find a suitable conversion instruction. |
1582 // Insert two "dummy" conversion instructions with the correct | 1548 // Insert two "dummy" conversion instructions with the correct |
1583 // "from" and "to" representation. The inserted instructions will | 1549 // "from" and "to" representation. The inserted instructions will |
1584 // trigger a deoptimization if executed. See #12417 for a discussion. | 1550 // trigger a deoptimization if executed. See #12417 for a discussion. |
1585 const intptr_t deopt_id = (deopt_target != NULL) ? | 1551 const intptr_t deopt_id = (deopt_target != NULL) |
1586 deopt_target->DeoptimizationTarget() : Thread::kNoDeoptId; | 1552 ? deopt_target->DeoptimizationTarget() |
| 1553 : Thread::kNoDeoptId; |
1587 ASSERT(Boxing::Supports(from)); | 1554 ASSERT(Boxing::Supports(from)); |
1588 ASSERT(Boxing::Supports(to)); | 1555 ASSERT(Boxing::Supports(to)); |
1589 Definition* boxed = BoxInstr::Create(from, use->CopyWithType()); | 1556 Definition* boxed = BoxInstr::Create(from, use->CopyWithType()); |
1590 use->BindTo(boxed); | 1557 use->BindTo(boxed); |
1591 InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); | 1558 InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue); |
1592 converted = UnboxInstr::Create(to, new(Z) Value(boxed), deopt_id); | 1559 converted = UnboxInstr::Create(to, new (Z) Value(boxed), deopt_id); |
1593 } | 1560 } |
1594 ASSERT(converted != NULL); | 1561 ASSERT(converted != NULL); |
1595 InsertBefore(insert_before, converted, use->instruction()->env(), | 1562 InsertBefore(insert_before, converted, use->instruction()->env(), |
1596 FlowGraph::kValue); | 1563 FlowGraph::kValue); |
1597 if (is_environment_use) { | 1564 if (is_environment_use) { |
1598 use->BindToEnvironment(converted); | 1565 use->BindToEnvironment(converted); |
1599 } else { | 1566 } else { |
1600 use->BindTo(converted); | 1567 use->BindTo(converted); |
1601 } | 1568 } |
1602 | 1569 |
1603 if ((to == kUnboxedInt32) && (phi != NULL)) { | 1570 if ((to == kUnboxedInt32) && (phi != NULL)) { |
1604 // Int32 phis are unboxed optimistically. Ensure that unboxing | 1571 // Int32 phis are unboxed optimistically. Ensure that unboxing |
1605 // has deoptimization target attached from the goto instruction. | 1572 // has deoptimization target attached from the goto instruction. |
1606 CopyDeoptTarget(converted, insert_before); | 1573 CopyDeoptTarget(converted, insert_before); |
1607 } | 1574 } |
1608 } | 1575 } |
1609 | 1576 |
1610 | 1577 |
1611 void FlowGraph::ConvertEnvironmentUse(Value* use, Representation from_rep) { | 1578 void FlowGraph::ConvertEnvironmentUse(Value* use, Representation from_rep) { |
1612 const Representation to_rep = kTagged; | 1579 const Representation to_rep = kTagged; |
1613 if (from_rep == to_rep) { | 1580 if (from_rep == to_rep) { |
1614 return; | 1581 return; |
1615 } | 1582 } |
1616 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ true); | 1583 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/true); |
1617 } | 1584 } |
1618 | 1585 |
1619 | 1586 |
1620 void FlowGraph::InsertConversionsFor(Definition* def) { | 1587 void FlowGraph::InsertConversionsFor(Definition* def) { |
1621 const Representation from_rep = def->representation(); | 1588 const Representation from_rep = def->representation(); |
1622 | 1589 |
1623 for (Value::Iterator it(def->input_use_list()); | 1590 for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) { |
1624 !it.Done(); | |
1625 it.Advance()) { | |
1626 ConvertUse(it.Current(), from_rep); | 1591 ConvertUse(it.Current(), from_rep); |
1627 } | 1592 } |
1628 | 1593 |
1629 if (graph_entry()->SuccessorCount() > 1) { | 1594 if (graph_entry()->SuccessorCount() > 1) { |
1630 for (Value::Iterator it(def->env_use_list()); | 1595 for (Value::Iterator it(def->env_use_list()); !it.Done(); it.Advance()) { |
1631 !it.Done(); | |
1632 it.Advance()) { | |
1633 Value* use = it.Current(); | 1596 Value* use = it.Current(); |
1634 if (use->instruction()->MayThrow() && | 1597 if (use->instruction()->MayThrow() && |
1635 use->instruction()->GetBlock()->InsideTryBlock()) { | 1598 use->instruction()->GetBlock()->InsideTryBlock()) { |
1636 // Environment uses at calls inside try-blocks must be converted to | 1599 // Environment uses at calls inside try-blocks must be converted to |
1637 // tagged representation. | 1600 // tagged representation. |
1638 ConvertEnvironmentUse(it.Current(), from_rep); | 1601 ConvertEnvironmentUse(it.Current(), from_rep); |
1639 } | 1602 } |
1640 } | 1603 } |
1641 } | 1604 } |
1642 } | 1605 } |
(...skipping 18 matching lines...) Expand all Loading... |
1661 unboxed = kUnboxedInt32x4; | 1624 unboxed = kUnboxedInt32x4; |
1662 } | 1625 } |
1663 break; | 1626 break; |
1664 case kFloat64x2Cid: | 1627 case kFloat64x2Cid: |
1665 if (ShouldInlineSimd()) { | 1628 if (ShouldInlineSimd()) { |
1666 unboxed = kUnboxedFloat64x2; | 1629 unboxed = kUnboxedFloat64x2; |
1667 } | 1630 } |
1668 break; | 1631 break; |
1669 } | 1632 } |
1670 | 1633 |
1671 if ((kSmiBits < 32) && | 1634 if ((kSmiBits < 32) && (unboxed == kTagged) && phi->Type()->IsInt() && |
1672 (unboxed == kTagged) && | |
1673 phi->Type()->IsInt() && | |
1674 RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt64)) { | 1635 RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt64)) { |
1675 // On 32-bit platforms conservatively unbox phis that: | 1636 // On 32-bit platforms conservatively unbox phis that: |
1676 // - are proven to be of type Int; | 1637 // - are proven to be of type Int; |
1677 // - fit into 64bits range; | 1638 // - fit into 64bits range; |
1678 // - have either constants or Box() operations as inputs; | 1639 // - have either constants or Box() operations as inputs; |
1679 // - have at least one Box() operation as an input; | 1640 // - have at least one Box() operation as an input; |
1680 // - are used in at least 1 Unbox() operation. | 1641 // - are used in at least 1 Unbox() operation. |
1681 bool should_unbox = false; | 1642 bool should_unbox = false; |
1682 for (intptr_t i = 0; i < phi->InputCount(); i++) { | 1643 for (intptr_t i = 0; i < phi->InputCount(); i++) { |
1683 Definition* input = phi->InputAt(i)->definition(); | 1644 Definition* input = phi->InputAt(i)->definition(); |
1684 if (input->IsBox() && | 1645 if (input->IsBox() && |
1685 RangeUtils::Fits(input->range(), | 1646 RangeUtils::Fits(input->range(), |
1686 RangeBoundary::kRangeBoundaryInt64)) { | 1647 RangeBoundary::kRangeBoundaryInt64)) { |
1687 should_unbox = true; | 1648 should_unbox = true; |
1688 } else if (!input->IsConstant()) { | 1649 } else if (!input->IsConstant()) { |
1689 should_unbox = false; | 1650 should_unbox = false; |
1690 break; | 1651 break; |
1691 } | 1652 } |
1692 } | 1653 } |
1693 | 1654 |
1694 if (should_unbox) { | 1655 if (should_unbox) { |
1695 // We checked inputs. Check if phi is used in at least one unbox | 1656 // We checked inputs. Check if phi is used in at least one unbox |
1696 // operation. | 1657 // operation. |
1697 bool has_unboxed_use = false; | 1658 bool has_unboxed_use = false; |
1698 for (Value* use = phi->input_use_list(); | 1659 for (Value* use = phi->input_use_list(); use != NULL; |
1699 use != NULL; | |
1700 use = use->next_use()) { | 1660 use = use->next_use()) { |
1701 Instruction* instr = use->instruction(); | 1661 Instruction* instr = use->instruction(); |
1702 if (instr->IsUnbox()) { | 1662 if (instr->IsUnbox()) { |
1703 has_unboxed_use = true; | 1663 has_unboxed_use = true; |
1704 break; | 1664 break; |
1705 } else if (IsUnboxedInteger( | 1665 } else if (IsUnboxedInteger( |
1706 instr->RequiredInputRepresentation(use->use_index()))) { | 1666 instr->RequiredInputRepresentation(use->use_index()))) { |
1707 has_unboxed_use = true; | 1667 has_unboxed_use = true; |
1708 break; | 1668 break; |
1709 } | 1669 } |
1710 } | 1670 } |
1711 | 1671 |
1712 if (!has_unboxed_use) { | 1672 if (!has_unboxed_use) { |
1713 should_unbox = false; | 1673 should_unbox = false; |
1714 } | 1674 } |
1715 } | 1675 } |
1716 | 1676 |
1717 if (should_unbox) { | 1677 if (should_unbox) { |
1718 unboxed = | 1678 unboxed = |
1719 RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32) | 1679 RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32) |
1720 ? kUnboxedInt32 : kUnboxedMint; | 1680 ? kUnboxedInt32 |
| 1681 : kUnboxedMint; |
1721 } | 1682 } |
1722 } | 1683 } |
1723 | 1684 |
1724 phi->set_representation(unboxed); | 1685 phi->set_representation(unboxed); |
1725 } | 1686 } |
1726 | 1687 |
1727 | 1688 |
1728 void FlowGraph::SelectRepresentations() { | 1689 void FlowGraph::SelectRepresentations() { |
1729 // Conservatively unbox all phis that were proven to be of Double, | 1690 // Conservatively unbox all phis that were proven to be of Double, |
1730 // Float32x4, or Int32x4 type. | 1691 // Float32x4, or Int32x4 type. |
1731 for (BlockIterator block_it = reverse_postorder_iterator(); | 1692 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); |
1732 !block_it.Done(); | |
1733 block_it.Advance()) { | 1693 block_it.Advance()) { |
1734 JoinEntryInstr* join_entry = block_it.Current()->AsJoinEntry(); | 1694 JoinEntryInstr* join_entry = block_it.Current()->AsJoinEntry(); |
1735 if (join_entry != NULL) { | 1695 if (join_entry != NULL) { |
1736 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { | 1696 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { |
1737 PhiInstr* phi = it.Current(); | 1697 PhiInstr* phi = it.Current(); |
1738 UnboxPhi(phi); | 1698 UnboxPhi(phi); |
1739 } | 1699 } |
1740 } | 1700 } |
1741 } | 1701 } |
1742 | 1702 |
1743 // Process all instructions and insert conversions where needed. | 1703 // Process all instructions and insert conversions where needed. |
1744 // Visit incoming parameters and constants. | 1704 // Visit incoming parameters and constants. |
1745 for (intptr_t i = 0; | 1705 for (intptr_t i = 0; i < graph_entry()->initial_definitions()->length(); |
1746 i < graph_entry()->initial_definitions()->length(); | |
1747 i++) { | 1706 i++) { |
1748 InsertConversionsFor((*graph_entry()->initial_definitions())[i]); | 1707 InsertConversionsFor((*graph_entry()->initial_definitions())[i]); |
1749 } | 1708 } |
1750 | 1709 |
1751 for (BlockIterator block_it = reverse_postorder_iterator(); | 1710 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); |
1752 !block_it.Done(); | |
1753 block_it.Advance()) { | 1711 block_it.Advance()) { |
1754 BlockEntryInstr* entry = block_it.Current(); | 1712 BlockEntryInstr* entry = block_it.Current(); |
1755 JoinEntryInstr* join_entry = entry->AsJoinEntry(); | 1713 JoinEntryInstr* join_entry = entry->AsJoinEntry(); |
1756 if (join_entry != NULL) { | 1714 if (join_entry != NULL) { |
1757 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { | 1715 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) { |
1758 PhiInstr* phi = it.Current(); | 1716 PhiInstr* phi = it.Current(); |
1759 ASSERT(phi != NULL); | 1717 ASSERT(phi != NULL); |
1760 ASSERT(phi->is_alive()); | 1718 ASSERT(phi->is_alive()); |
1761 InsertConversionsFor(phi); | 1719 InsertConversionsFor(phi); |
1762 } | 1720 } |
1763 } | 1721 } |
1764 CatchBlockEntryInstr* catch_entry = entry->AsCatchBlockEntry(); | 1722 CatchBlockEntryInstr* catch_entry = entry->AsCatchBlockEntry(); |
1765 if (catch_entry != NULL) { | 1723 if (catch_entry != NULL) { |
1766 for (intptr_t i = 0; | 1724 for (intptr_t i = 0; i < catch_entry->initial_definitions()->length(); |
1767 i < catch_entry->initial_definitions()->length(); | |
1768 i++) { | 1725 i++) { |
1769 InsertConversionsFor((*catch_entry->initial_definitions())[i]); | 1726 InsertConversionsFor((*catch_entry->initial_definitions())[i]); |
1770 } | 1727 } |
1771 } | 1728 } |
1772 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { | 1729 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
1773 Definition* def = it.Current()->AsDefinition(); | 1730 Definition* def = it.Current()->AsDefinition(); |
1774 if (def != NULL) { | 1731 if (def != NULL) { |
1775 InsertConversionsFor(def); | 1732 InsertConversionsFor(def); |
1776 } | 1733 } |
1777 } | 1734 } |
1778 } | 1735 } |
1779 } | 1736 } |
1780 | 1737 |
1781 | 1738 |
1782 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) | 1739 #if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32) |
1783 // Smi widening pass is only meaningful on platforms where Smi | 1740 // Smi widening pass is only meaningful on platforms where Smi |
1784 // is smaller than 32bit. For now only support it on ARM and ia32. | 1741 // is smaller than 32bit. For now only support it on ARM and ia32. |
1785 static bool CanBeWidened(BinarySmiOpInstr* smi_op) { | 1742 static bool CanBeWidened(BinarySmiOpInstr* smi_op) { |
1786 return BinaryInt32OpInstr::IsSupported(smi_op->op_kind(), | 1743 return BinaryInt32OpInstr::IsSupported(smi_op->op_kind(), smi_op->left(), |
1787 smi_op->left(), | |
1788 smi_op->right()); | 1744 smi_op->right()); |
1789 } | 1745 } |
1790 | 1746 |
1791 | 1747 |
1792 static bool BenefitsFromWidening(BinarySmiOpInstr* smi_op) { | 1748 static bool BenefitsFromWidening(BinarySmiOpInstr* smi_op) { |
1793 // TODO(vegorov): when shifts with non-constants shift count are supported | 1749 // TODO(vegorov): when shifts with non-constants shift count are supported |
1794 // add them here as we save untagging for the count. | 1750 // add them here as we save untagging for the count. |
1795 switch (smi_op->op_kind()) { | 1751 switch (smi_op->op_kind()) { |
1796 case Token::kMUL: | 1752 case Token::kMUL: |
1797 case Token::kSHR: | 1753 case Token::kSHR: |
1798 // For kMUL we save untagging of the argument for kSHR | 1754 // For kMUL we save untagging of the argument for kSHR |
1799 // we save tagging of the result. | 1755 // we save tagging of the result. |
1800 return true; | 1756 return true; |
1801 | 1757 |
1802 default: | 1758 default: |
1803 return false; | 1759 return false; |
1804 } | 1760 } |
1805 } | 1761 } |
1806 | 1762 |
1807 | 1763 |
1808 void FlowGraph::WidenSmiToInt32() { | 1764 void FlowGraph::WidenSmiToInt32() { |
1809 GrowableArray<BinarySmiOpInstr*> candidates; | 1765 GrowableArray<BinarySmiOpInstr*> candidates; |
1810 | 1766 |
1811 // Step 1. Collect all instructions that potentially benefit from widening of | 1767 // Step 1. Collect all instructions that potentially benefit from widening of |
1812 // their operands (or their result) into int32 range. | 1768 // their operands (or their result) into int32 range. |
1813 for (BlockIterator block_it = reverse_postorder_iterator(); | 1769 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); |
1814 !block_it.Done(); | |
1815 block_it.Advance()) { | 1770 block_it.Advance()) { |
1816 for (ForwardInstructionIterator instr_it(block_it.Current()); | 1771 for (ForwardInstructionIterator instr_it(block_it.Current()); |
1817 !instr_it.Done(); | 1772 !instr_it.Done(); instr_it.Advance()) { |
1818 instr_it.Advance()) { | |
1819 BinarySmiOpInstr* smi_op = instr_it.Current()->AsBinarySmiOp(); | 1773 BinarySmiOpInstr* smi_op = instr_it.Current()->AsBinarySmiOp(); |
1820 if ((smi_op != NULL) && | 1774 if ((smi_op != NULL) && smi_op->HasSSATemp() && |
1821 smi_op->HasSSATemp() && | 1775 BenefitsFromWidening(smi_op) && CanBeWidened(smi_op)) { |
1822 BenefitsFromWidening(smi_op) && | |
1823 CanBeWidened(smi_op)) { | |
1824 candidates.Add(smi_op); | 1776 candidates.Add(smi_op); |
1825 } | 1777 } |
1826 } | 1778 } |
1827 } | 1779 } |
1828 | 1780 |
1829 if (candidates.is_empty()) { | 1781 if (candidates.is_empty()) { |
1830 return; | 1782 return; |
1831 } | 1783 } |
1832 | 1784 |
1833 // Step 2. For each block in the graph compute which loop it belongs to. | 1785 // Step 2. For each block in the graph compute which loop it belongs to. |
1834 // We will use this information later during computation of the widening's | 1786 // We will use this information later during computation of the widening's |
1835 // gain: we are going to assume that only conversion occuring inside the | 1787 // gain: we are going to assume that only conversion occuring inside the |
1836 // same loop should be counted against the gain, all other conversions | 1788 // same loop should be counted against the gain, all other conversions |
1837 // can be hoisted and thus cost nothing compared to the loop cost itself. | 1789 // can be hoisted and thus cost nothing compared to the loop cost itself. |
1838 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = LoopHeaders(); | 1790 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = LoopHeaders(); |
1839 | 1791 |
1840 GrowableArray<intptr_t> loops(preorder().length()); | 1792 GrowableArray<intptr_t> loops(preorder().length()); |
1841 for (intptr_t i = 0; i < preorder().length(); i++) { | 1793 for (intptr_t i = 0; i < preorder().length(); i++) { |
1842 loops.Add(-1); | 1794 loops.Add(-1); |
1843 } | 1795 } |
1844 | 1796 |
1845 for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) { | 1797 for (intptr_t loop_id = 0; loop_id < loop_headers.length(); ++loop_id) { |
1846 for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info()); | 1798 for (BitVector::Iterator loop_it(loop_headers[loop_id]->loop_info()); |
1847 !loop_it.Done(); | 1799 !loop_it.Done(); loop_it.Advance()) { |
1848 loop_it.Advance()) { | |
1849 loops[loop_it.Current()] = loop_id; | 1800 loops[loop_it.Current()] = loop_id; |
1850 } | 1801 } |
1851 } | 1802 } |
1852 | 1803 |
1853 // Step 3. For each candidate transitively collect all other BinarySmiOpInstr | 1804 // Step 3. For each candidate transitively collect all other BinarySmiOpInstr |
1854 // and PhiInstr that depend on it and that it depends on and count amount of | 1805 // and PhiInstr that depend on it and that it depends on and count amount of |
1855 // untagging operations that we save in assumption that this whole graph of | 1806 // untagging operations that we save in assumption that this whole graph of |
1856 // values is using kUnboxedInt32 representation instead of kTagged. | 1807 // values is using kUnboxedInt32 representation instead of kTagged. |
1857 // Convert those graphs that have positive gain to kUnboxedInt32. | 1808 // Convert those graphs that have positive gain to kUnboxedInt32. |
1858 | 1809 |
1859 // BitVector containing SSA indexes of all processed definitions. Used to skip | 1810 // BitVector containing SSA indexes of all processed definitions. Used to skip |
1860 // those candidates that belong to dependency graph of another candidate. | 1811 // those candidates that belong to dependency graph of another candidate. |
1861 BitVector* processed = | 1812 BitVector* processed = new (Z) BitVector(Z, current_ssa_temp_index()); |
1862 new(Z) BitVector(Z, current_ssa_temp_index()); | |
1863 | 1813 |
1864 // Worklist used to collect dependency graph. | 1814 // Worklist used to collect dependency graph. |
1865 DefinitionWorklist worklist(this, candidates.length()); | 1815 DefinitionWorklist worklist(this, candidates.length()); |
1866 for (intptr_t i = 0; i < candidates.length(); i++) { | 1816 for (intptr_t i = 0; i < candidates.length(); i++) { |
1867 BinarySmiOpInstr* op = candidates[i]; | 1817 BinarySmiOpInstr* op = candidates[i]; |
1868 if (op->WasEliminated() || processed->Contains(op->ssa_temp_index())) { | 1818 if (op->WasEliminated() || processed->Contains(op->ssa_temp_index())) { |
1869 continue; | 1819 continue; |
1870 } | 1820 } |
1871 | 1821 |
1872 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 1822 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { |
(...skipping 18 matching lines...) Expand all Loading... |
1891 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 1841 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { |
1892 THR_Print("^ [%" Pd "] (o) %s\n", gain, defn->ToCString()); | 1842 THR_Print("^ [%" Pd "] (o) %s\n", gain, defn->ToCString()); |
1893 } | 1843 } |
1894 } | 1844 } |
1895 | 1845 |
1896 const intptr_t defn_loop = loops[defn->GetBlock()->preorder_number()]; | 1846 const intptr_t defn_loop = loops[defn->GetBlock()->preorder_number()]; |
1897 | 1847 |
1898 // Process all inputs. | 1848 // Process all inputs. |
1899 for (intptr_t k = 0; k < defn->InputCount(); k++) { | 1849 for (intptr_t k = 0; k < defn->InputCount(); k++) { |
1900 Definition* input = defn->InputAt(k)->definition(); | 1850 Definition* input = defn->InputAt(k)->definition(); |
1901 if (input->IsBinarySmiOp() && | 1851 if (input->IsBinarySmiOp() && CanBeWidened(input->AsBinarySmiOp())) { |
1902 CanBeWidened(input->AsBinarySmiOp())) { | |
1903 worklist.Add(input); | 1852 worklist.Add(input); |
1904 } else if (input->IsPhi() && (input->Type()->ToCid() == kSmiCid)) { | 1853 } else if (input->IsPhi() && (input->Type()->ToCid() == kSmiCid)) { |
1905 worklist.Add(input); | 1854 worklist.Add(input); |
1906 } else if (input->IsBinaryMintOp()) { | 1855 } else if (input->IsBinaryMintOp()) { |
1907 // Mint operation produces untagged result. We avoid tagging. | 1856 // Mint operation produces untagged result. We avoid tagging. |
1908 gain++; | 1857 gain++; |
1909 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 1858 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { |
1910 THR_Print("^ [%" Pd "] (i) %s\n", gain, input->ToCString()); | 1859 THR_Print("^ [%" Pd "] (i) %s\n", gain, input->ToCString()); |
1911 } | 1860 } |
1912 } else if (defn_loop == loops[input->GetBlock()->preorder_number()] && | 1861 } else if (defn_loop == loops[input->GetBlock()->preorder_number()] && |
1913 (input->Type()->ToCid() == kSmiCid)) { | 1862 (input->Type()->ToCid() == kSmiCid)) { |
1914 // Input comes from the same loop, is known to be smi and requires | 1863 // Input comes from the same loop, is known to be smi and requires |
1915 // untagging. | 1864 // untagging. |
1916 // TODO(vegorov) this heuristic assumes that values that are not | 1865 // TODO(vegorov) this heuristic assumes that values that are not |
1917 // known to be smi have to be checked and this check can be | 1866 // known to be smi have to be checked and this check can be |
1918 // coalesced with untagging. Start coalescing them. | 1867 // coalesced with untagging. Start coalescing them. |
1919 gain--; | 1868 gain--; |
1920 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 1869 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { |
1921 THR_Print("v [%" Pd "] (i) %s\n", gain, input->ToCString()); | 1870 THR_Print("v [%" Pd "] (i) %s\n", gain, input->ToCString()); |
1922 } | 1871 } |
1923 } | 1872 } |
1924 } | 1873 } |
1925 | 1874 |
1926 // Process all uses. | 1875 // Process all uses. |
1927 for (Value* use = defn->input_use_list(); | 1876 for (Value* use = defn->input_use_list(); use != NULL; |
1928 use != NULL; | |
1929 use = use->next_use()) { | 1877 use = use->next_use()) { |
1930 Instruction* instr = use->instruction(); | 1878 Instruction* instr = use->instruction(); |
1931 Definition* use_defn = instr->AsDefinition(); | 1879 Definition* use_defn = instr->AsDefinition(); |
1932 if (use_defn == NULL) { | 1880 if (use_defn == NULL) { |
1933 // We assume that tagging before returning or pushing argument costs | 1881 // We assume that tagging before returning or pushing argument costs |
1934 // very little compared to the cost of the return/call itself. | 1882 // very little compared to the cost of the return/call itself. |
1935 if (!instr->IsReturn() && !instr->IsPushArgument()) { | 1883 if (!instr->IsReturn() && !instr->IsPushArgument()) { |
1936 gain--; | 1884 gain--; |
1937 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 1885 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { |
1938 THR_Print("v [%" Pd "] (u) %s\n", | 1886 THR_Print("v [%" Pd "] (u) %s\n", gain, |
1939 gain, | |
1940 use->instruction()->ToCString()); | 1887 use->instruction()->ToCString()); |
1941 } | 1888 } |
1942 } | 1889 } |
1943 continue; | 1890 continue; |
1944 } else if (use_defn->IsBinarySmiOp() && | 1891 } else if (use_defn->IsBinarySmiOp() && |
1945 CanBeWidened(use_defn->AsBinarySmiOp())) { | 1892 CanBeWidened(use_defn->AsBinarySmiOp())) { |
1946 worklist.Add(use_defn); | 1893 worklist.Add(use_defn); |
1947 } else if (use_defn->IsPhi() && | 1894 } else if (use_defn->IsPhi() && |
1948 use_defn->AsPhi()->Type()->ToCid() == kSmiCid) { | 1895 use_defn->AsPhi()->Type()->ToCid() == kSmiCid) { |
1949 worklist.Add(use_defn); | 1896 worklist.Add(use_defn); |
1950 } else if (use_defn->IsBinaryMintOp()) { | 1897 } else if (use_defn->IsBinaryMintOp()) { |
1951 // BinaryMintOp requires untagging of its inputs. | 1898 // BinaryMintOp requires untagging of its inputs. |
1952 // Converting kUnboxedInt32 to kUnboxedMint is essentially zero cost | 1899 // Converting kUnboxedInt32 to kUnboxedMint is essentially zero cost |
1953 // sign extension operation. | 1900 // sign extension operation. |
1954 gain++; | 1901 gain++; |
1955 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 1902 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { |
1956 THR_Print("^ [%" Pd "] (u) %s\n", | 1903 THR_Print("^ [%" Pd "] (u) %s\n", gain, |
1957 gain, | |
1958 use->instruction()->ToCString()); | 1904 use->instruction()->ToCString()); |
1959 } | 1905 } |
1960 } else if (defn_loop == loops[instr->GetBlock()->preorder_number()]) { | 1906 } else if (defn_loop == loops[instr->GetBlock()->preorder_number()]) { |
1961 gain--; | 1907 gain--; |
1962 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 1908 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { |
1963 THR_Print("v [%" Pd "] (u) %s\n", | 1909 THR_Print("v [%" Pd "] (u) %s\n", gain, |
1964 gain, | |
1965 use->instruction()->ToCString()); | 1910 use->instruction()->ToCString()); |
1966 } | 1911 } |
1967 } | 1912 } |
1968 } | 1913 } |
1969 } | 1914 } |
1970 | 1915 |
1971 processed->AddAll(worklist.contains_vector()); | 1916 processed->AddAll(worklist.contains_vector()); |
1972 | 1917 |
1973 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { | 1918 if (FLAG_support_il_printer && FLAG_trace_smi_widening) { |
1974 THR_Print("~ %s gain %" Pd "\n", op->ToCString(), gain); | 1919 THR_Print("~ %s gain %" Pd "\n", op->ToCString(), gain); |
1975 } | 1920 } |
1976 | 1921 |
1977 if (gain > 0) { | 1922 if (gain > 0) { |
1978 // We have positive gain from widening. Convert all BinarySmiOpInstr into | 1923 // We have positive gain from widening. Convert all BinarySmiOpInstr into |
1979 // BinaryInt32OpInstr and set representation of all phis to kUnboxedInt32. | 1924 // BinaryInt32OpInstr and set representation of all phis to kUnboxedInt32. |
1980 for (intptr_t j = 0; j < worklist.definitions().length(); j++) { | 1925 for (intptr_t j = 0; j < worklist.definitions().length(); j++) { |
1981 Definition* defn = worklist.definitions()[j]; | 1926 Definition* defn = worklist.definitions()[j]; |
1982 ASSERT(defn->IsPhi() || defn->IsBinarySmiOp()); | 1927 ASSERT(defn->IsPhi() || defn->IsBinarySmiOp()); |
1983 | 1928 |
1984 if (defn->IsBinarySmiOp()) { | 1929 if (defn->IsBinarySmiOp()) { |
1985 BinarySmiOpInstr* smi_op = defn->AsBinarySmiOp(); | 1930 BinarySmiOpInstr* smi_op = defn->AsBinarySmiOp(); |
1986 BinaryInt32OpInstr* int32_op = new(Z) BinaryInt32OpInstr( | 1931 BinaryInt32OpInstr* int32_op = new (Z) BinaryInt32OpInstr( |
1987 smi_op->op_kind(), | 1932 smi_op->op_kind(), smi_op->left()->CopyWithType(), |
1988 smi_op->left()->CopyWithType(), | 1933 smi_op->right()->CopyWithType(), smi_op->DeoptimizationTarget()); |
1989 smi_op->right()->CopyWithType(), | |
1990 smi_op->DeoptimizationTarget()); | |
1991 | 1934 |
1992 smi_op->ReplaceWith(int32_op, NULL); | 1935 smi_op->ReplaceWith(int32_op, NULL); |
1993 } else if (defn->IsPhi()) { | 1936 } else if (defn->IsPhi()) { |
1994 defn->AsPhi()->set_representation(kUnboxedInt32); | 1937 defn->AsPhi()->set_representation(kUnboxedInt32); |
1995 ASSERT(defn->Type()->IsInt()); | 1938 ASSERT(defn->Type()->IsInt()); |
1996 } | 1939 } |
1997 } | 1940 } |
1998 } | 1941 } |
1999 } | 1942 } |
2000 } | 1943 } |
2001 #else | 1944 #else |
2002 void FlowGraph::WidenSmiToInt32() { | 1945 void FlowGraph::WidenSmiToInt32() { |
2003 // TODO(vegorov) ideally on 64-bit platforms we would like to narrow smi | 1946 // TODO(vegorov) ideally on 64-bit platforms we would like to narrow smi |
2004 // operations to 32-bit where it saves tagging and untagging and allows | 1947 // operations to 32-bit where it saves tagging and untagging and allows |
2005 // to use shorted (and faster) instructions. But we currently don't | 1948 // to use shorted (and faster) instructions. But we currently don't |
2006 // save enough range information in the ICData to drive this decision. | 1949 // save enough range information in the ICData to drive this decision. |
2007 } | 1950 } |
2008 #endif | 1951 #endif |
2009 | 1952 |
2010 | 1953 |
2011 void FlowGraph::EliminateEnvironments() { | 1954 void FlowGraph::EliminateEnvironments() { |
2012 // After this pass we can no longer perform LICM and hoist instructions | 1955 // After this pass we can no longer perform LICM and hoist instructions |
2013 // that can deoptimize. | 1956 // that can deoptimize. |
2014 | 1957 |
2015 disallow_licm(); | 1958 disallow_licm(); |
2016 for (BlockIterator block_it = reverse_postorder_iterator(); | 1959 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); |
2017 !block_it.Done(); | |
2018 block_it.Advance()) { | 1960 block_it.Advance()) { |
2019 BlockEntryInstr* block = block_it.Current(); | 1961 BlockEntryInstr* block = block_it.Current(); |
2020 if (!block->IsCatchBlockEntry()) { | 1962 if (!block->IsCatchBlockEntry()) { |
2021 block->RemoveEnvironment(); | 1963 block->RemoveEnvironment(); |
2022 } | 1964 } |
2023 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { | 1965 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |
2024 Instruction* current = it.Current(); | 1966 Instruction* current = it.Current(); |
2025 if (!current->CanDeoptimize() && | 1967 if (!current->CanDeoptimize() && |
2026 (!current->MayThrow() || !current->GetBlock()->InsideTryBlock())) { | 1968 (!current->MayThrow() || !current->GetBlock()->InsideTryBlock())) { |
2027 // Instructions that can throw need an environment for optimized | 1969 // Instructions that can throw need an environment for optimized |
2028 // try-catch. | 1970 // try-catch. |
2029 // TODO(srdjan): --source-lines needs deopt environments to get at | 1971 // TODO(srdjan): --source-lines needs deopt environments to get at |
2030 // the code for this instruction, however, leaving the environment | 1972 // the code for this instruction, however, leaving the environment |
2031 // changes code. | 1973 // changes code. |
2032 current->RemoveEnvironment(); | 1974 current->RemoveEnvironment(); |
2033 } | 1975 } |
2034 } | 1976 } |
2035 } | 1977 } |
2036 } | 1978 } |
2037 | 1979 |
2038 | 1980 |
2039 bool FlowGraph::Canonicalize() { | 1981 bool FlowGraph::Canonicalize() { |
2040 bool changed = false; | 1982 bool changed = false; |
2041 | 1983 |
2042 for (BlockIterator block_it = reverse_postorder_iterator(); | 1984 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); |
2043 !block_it.Done(); | |
2044 block_it.Advance()) { | 1985 block_it.Advance()) { |
2045 for (ForwardInstructionIterator it(block_it.Current()); | 1986 for (ForwardInstructionIterator it(block_it.Current()); !it.Done(); |
2046 !it.Done(); | |
2047 it.Advance()) { | 1987 it.Advance()) { |
2048 Instruction* current = it.Current(); | 1988 Instruction* current = it.Current(); |
2049 if (current->HasUnmatchedInputRepresentations()) { | 1989 if (current->HasUnmatchedInputRepresentations()) { |
2050 // Can't canonicalize this instruction until all conversions for its | 1990 // Can't canonicalize this instruction until all conversions for its |
2051 // inputs are inserted. | 1991 // inputs are inserted. |
2052 continue; | 1992 continue; |
2053 } | 1993 } |
2054 | 1994 |
2055 Instruction* replacement = current->Canonicalize(this); | 1995 Instruction* replacement = current->Canonicalize(this); |
2056 | 1996 |
(...skipping 10 matching lines...) Expand all Loading... |
2067 } | 2007 } |
2068 | 2008 |
2069 | 2009 |
2070 // Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the | 2010 // Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the |
2071 // shift can be a truncating Smi shift-left and result is always Smi. | 2011 // shift can be a truncating Smi shift-left and result is always Smi. |
2072 // Merging occurs only per basic-block. | 2012 // Merging occurs only per basic-block. |
2073 void FlowGraph::TryOptimizePatterns() { | 2013 void FlowGraph::TryOptimizePatterns() { |
2074 if (!FLAG_truncating_left_shift) return; | 2014 if (!FLAG_truncating_left_shift) return; |
2075 GrowableArray<BinarySmiOpInstr*> div_mod_merge; | 2015 GrowableArray<BinarySmiOpInstr*> div_mod_merge; |
2076 GrowableArray<InvokeMathCFunctionInstr*> sin_cos_merge; | 2016 GrowableArray<InvokeMathCFunctionInstr*> sin_cos_merge; |
2077 for (BlockIterator block_it = reverse_postorder_iterator(); | 2017 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); |
2078 !block_it.Done(); | |
2079 block_it.Advance()) { | 2018 block_it.Advance()) { |
2080 // Merging only per basic-block. | 2019 // Merging only per basic-block. |
2081 div_mod_merge.Clear(); | 2020 div_mod_merge.Clear(); |
2082 sin_cos_merge.Clear(); | 2021 sin_cos_merge.Clear(); |
2083 ForwardInstructionIterator it(block_it.Current()); | 2022 ForwardInstructionIterator it(block_it.Current()); |
2084 for (; !it.Done(); it.Advance()) { | 2023 for (; !it.Done(); it.Advance()) { |
2085 if (it.Current()->IsBinarySmiOp()) { | 2024 if (it.Current()->IsBinarySmiOp()) { |
2086 BinarySmiOpInstr* binop = it.Current()->AsBinarySmiOp(); | 2025 BinarySmiOpInstr* binop = it.Current()->AsBinarySmiOp(); |
2087 if (binop->op_kind() == Token::kBIT_AND) { | 2026 if (binop->op_kind() == Token::kBIT_AND) { |
2088 OptimizeLeftShiftBitAndSmiOp(&it, | 2027 OptimizeLeftShiftBitAndSmiOp(&it, binop, binop->left()->definition(), |
2089 binop, | |
2090 binop->left()->definition(), | |
2091 binop->right()->definition()); | 2028 binop->right()->definition()); |
2092 } else if ((binop->op_kind() == Token::kTRUNCDIV) || | 2029 } else if ((binop->op_kind() == Token::kTRUNCDIV) || |
2093 (binop->op_kind() == Token::kMOD)) { | 2030 (binop->op_kind() == Token::kMOD)) { |
2094 if (binop->HasUses()) { | 2031 if (binop->HasUses()) { |
2095 div_mod_merge.Add(binop); | 2032 div_mod_merge.Add(binop); |
2096 } | 2033 } |
2097 } | 2034 } |
2098 } else if (it.Current()->IsBinaryMintOp()) { | 2035 } else if (it.Current()->IsBinaryMintOp()) { |
2099 BinaryMintOpInstr* mintop = it.Current()->AsBinaryMintOp(); | 2036 BinaryMintOpInstr* mintop = it.Current()->AsBinaryMintOp(); |
2100 if (mintop->op_kind() == Token::kBIT_AND) { | 2037 if (mintop->op_kind() == Token::kBIT_AND) { |
2101 OptimizeLeftShiftBitAndSmiOp(&it, | 2038 OptimizeLeftShiftBitAndSmiOp(&it, mintop, |
2102 mintop, | |
2103 mintop->left()->definition(), | 2039 mintop->left()->definition(), |
2104 mintop->right()->definition()); | 2040 mintop->right()->definition()); |
2105 } | 2041 } |
2106 } else if (it.Current()->IsInvokeMathCFunction()) { | 2042 } else if (it.Current()->IsInvokeMathCFunction()) { |
2107 InvokeMathCFunctionInstr* math_unary = | 2043 InvokeMathCFunctionInstr* math_unary = |
2108 it.Current()->AsInvokeMathCFunction(); | 2044 it.Current()->AsInvokeMathCFunction(); |
2109 if ((math_unary->recognized_kind() == MethodRecognizer::kMathSin) || | 2045 if ((math_unary->recognized_kind() == MethodRecognizer::kMathSin) || |
2110 (math_unary->recognized_kind() == MethodRecognizer::kMathCos)) { | 2046 (math_unary->recognized_kind() == MethodRecognizer::kMathCos)) { |
2111 if (math_unary->HasUses()) { | 2047 if (math_unary->HasUses()) { |
2112 sin_cos_merge.Add(math_unary); | 2048 sin_cos_merge.Add(math_unary); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2160 if ((smi_shift_left == NULL) && (bit_and_instr->InputAt(1)->IsSingleUse())) { | 2096 if ((smi_shift_left == NULL) && (bit_and_instr->InputAt(1)->IsSingleUse())) { |
2161 smi_shift_left = AsSmiShiftLeftInstruction(right_instr); | 2097 smi_shift_left = AsSmiShiftLeftInstruction(right_instr); |
2162 } | 2098 } |
2163 if (smi_shift_left == NULL) return; | 2099 if (smi_shift_left == NULL) return; |
2164 | 2100 |
2165 // Pattern recognized. | 2101 // Pattern recognized. |
2166 smi_shift_left->mark_truncating(); | 2102 smi_shift_left->mark_truncating(); |
2167 ASSERT(bit_and_instr->IsBinarySmiOp() || bit_and_instr->IsBinaryMintOp()); | 2103 ASSERT(bit_and_instr->IsBinarySmiOp() || bit_and_instr->IsBinaryMintOp()); |
2168 if (bit_and_instr->IsBinaryMintOp()) { | 2104 if (bit_and_instr->IsBinaryMintOp()) { |
2169 // Replace Mint op with Smi op. | 2105 // Replace Mint op with Smi op. |
2170 BinarySmiOpInstr* smi_op = new(Z) BinarySmiOpInstr( | 2106 BinarySmiOpInstr* smi_op = new (Z) BinarySmiOpInstr( |
2171 Token::kBIT_AND, | 2107 Token::kBIT_AND, new (Z) Value(left_instr), new (Z) Value(right_instr), |
2172 new(Z) Value(left_instr), | |
2173 new(Z) Value(right_instr), | |
2174 Thread::kNoDeoptId); // BIT_AND cannot deoptimize. | 2108 Thread::kNoDeoptId); // BIT_AND cannot deoptimize. |
2175 bit_and_instr->ReplaceWith(smi_op, current_iterator); | 2109 bit_and_instr->ReplaceWith(smi_op, current_iterator); |
2176 } | 2110 } |
2177 } | 2111 } |
2178 | 2112 |
2179 | 2113 |
2180 // Dart: | 2114 // Dart: |
2181 // var x = d % 10; | 2115 // var x = d % 10; |
2182 // var y = d ~/ 10; | 2116 // var y = d ~/ 10; |
2183 // var z = x + y; | 2117 // var z = x + y; |
(...skipping 18 matching lines...) Expand all Loading... |
2202 } | 2136 } |
2203 for (intptr_t i = 0; i < merge_candidates->length(); i++) { | 2137 for (intptr_t i = 0; i < merge_candidates->length(); i++) { |
2204 BinarySmiOpInstr* curr_instr = (*merge_candidates)[i]; | 2138 BinarySmiOpInstr* curr_instr = (*merge_candidates)[i]; |
2205 if (curr_instr == NULL) { | 2139 if (curr_instr == NULL) { |
2206 // Instruction was merged already. | 2140 // Instruction was merged already. |
2207 continue; | 2141 continue; |
2208 } | 2142 } |
2209 ASSERT((curr_instr->op_kind() == Token::kTRUNCDIV) || | 2143 ASSERT((curr_instr->op_kind() == Token::kTRUNCDIV) || |
2210 (curr_instr->op_kind() == Token::kMOD)); | 2144 (curr_instr->op_kind() == Token::kMOD)); |
2211 // Check if there is kMOD/kTRUNDIV binop with same inputs. | 2145 // Check if there is kMOD/kTRUNDIV binop with same inputs. |
2212 const Token::Kind other_kind = (curr_instr->op_kind() == Token::kTRUNCDIV) ? | 2146 const Token::Kind other_kind = (curr_instr->op_kind() == Token::kTRUNCDIV) |
2213 Token::kMOD : Token::kTRUNCDIV; | 2147 ? Token::kMOD |
| 2148 : Token::kTRUNCDIV; |
2214 Definition* left_def = curr_instr->left()->definition(); | 2149 Definition* left_def = curr_instr->left()->definition(); |
2215 Definition* right_def = curr_instr->right()->definition(); | 2150 Definition* right_def = curr_instr->right()->definition(); |
2216 for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { | 2151 for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { |
2217 BinarySmiOpInstr* other_binop = (*merge_candidates)[k]; | 2152 BinarySmiOpInstr* other_binop = (*merge_candidates)[k]; |
2218 // 'other_binop' can be NULL if it was already merged. | 2153 // 'other_binop' can be NULL if it was already merged. |
2219 if ((other_binop != NULL) && | 2154 if ((other_binop != NULL) && (other_binop->op_kind() == other_kind) && |
2220 (other_binop->op_kind() == other_kind) && | |
2221 (other_binop->left()->definition() == left_def) && | 2155 (other_binop->left()->definition() == left_def) && |
2222 (other_binop->right()->definition() == right_def)) { | 2156 (other_binop->right()->definition() == right_def)) { |
2223 (*merge_candidates)[k] = NULL; // Clear it. | 2157 (*merge_candidates)[k] = NULL; // Clear it. |
2224 ASSERT(curr_instr->HasUses()); | 2158 ASSERT(curr_instr->HasUses()); |
2225 AppendExtractNthOutputForMerged( | 2159 AppendExtractNthOutputForMerged( |
2226 curr_instr, | 2160 curr_instr, MergedMathInstr::OutputIndexOf(curr_instr->op_kind()), |
2227 MergedMathInstr::OutputIndexOf(curr_instr->op_kind()), | |
2228 kTagged, kSmiCid); | 2161 kTagged, kSmiCid); |
2229 ASSERT(other_binop->HasUses()); | 2162 ASSERT(other_binop->HasUses()); |
2230 AppendExtractNthOutputForMerged( | 2163 AppendExtractNthOutputForMerged( |
2231 other_binop, | 2164 other_binop, MergedMathInstr::OutputIndexOf(other_binop->op_kind()), |
2232 MergedMathInstr::OutputIndexOf(other_binop->op_kind()), | |
2233 kTagged, kSmiCid); | 2165 kTagged, kSmiCid); |
2234 | 2166 |
2235 ZoneGrowableArray<Value*>* args = new(Z) ZoneGrowableArray<Value*>(2); | 2167 ZoneGrowableArray<Value*>* args = new (Z) ZoneGrowableArray<Value*>(2); |
2236 args->Add(new(Z) Value(curr_instr->left()->definition())); | 2168 args->Add(new (Z) Value(curr_instr->left()->definition())); |
2237 args->Add(new(Z) Value(curr_instr->right()->definition())); | 2169 args->Add(new (Z) Value(curr_instr->right()->definition())); |
2238 | 2170 |
2239 // Replace with TruncDivMod. | 2171 // Replace with TruncDivMod. |
2240 MergedMathInstr* div_mod = new(Z) MergedMathInstr( | 2172 MergedMathInstr* div_mod = new (Z) MergedMathInstr( |
2241 args, | 2173 args, curr_instr->deopt_id(), MergedMathInstr::kTruncDivMod); |
2242 curr_instr->deopt_id(), | |
2243 MergedMathInstr::kTruncDivMod); | |
2244 curr_instr->ReplaceWith(div_mod, NULL); | 2174 curr_instr->ReplaceWith(div_mod, NULL); |
2245 other_binop->ReplaceUsesWith(div_mod); | 2175 other_binop->ReplaceUsesWith(div_mod); |
2246 other_binop->RemoveFromGraph(); | 2176 other_binop->RemoveFromGraph(); |
2247 // Only one merge possible. Because canonicalization happens later, | 2177 // Only one merge possible. Because canonicalization happens later, |
2248 // more candidates are possible. | 2178 // more candidates are possible. |
2249 // TODO(srdjan): Allow merging of trunc-div/mod into truncDivMod. | 2179 // TODO(srdjan): Allow merging of trunc-div/mod into truncDivMod. |
2250 break; | 2180 break; |
2251 } | 2181 } |
2252 } | 2182 } |
2253 } | 2183 } |
(...skipping 15 matching lines...) Expand all Loading... |
2269 InvokeMathCFunctionInstr* curr_instr = (*merge_candidates)[i]; | 2199 InvokeMathCFunctionInstr* curr_instr = (*merge_candidates)[i]; |
2270 if (curr_instr == NULL) { | 2200 if (curr_instr == NULL) { |
2271 // Instruction was merged already. | 2201 // Instruction was merged already. |
2272 continue; | 2202 continue; |
2273 } | 2203 } |
2274 const MethodRecognizer::Kind kind = curr_instr->recognized_kind(); | 2204 const MethodRecognizer::Kind kind = curr_instr->recognized_kind(); |
2275 ASSERT((kind == MethodRecognizer::kMathSin) || | 2205 ASSERT((kind == MethodRecognizer::kMathSin) || |
2276 (kind == MethodRecognizer::kMathCos)); | 2206 (kind == MethodRecognizer::kMathCos)); |
2277 // Check if there is sin/cos binop with same inputs. | 2207 // Check if there is sin/cos binop with same inputs. |
2278 const MethodRecognizer::Kind other_kind = | 2208 const MethodRecognizer::Kind other_kind = |
2279 (kind == MethodRecognizer::kMathSin) | 2209 (kind == MethodRecognizer::kMathSin) ? MethodRecognizer::kMathCos |
2280 ? MethodRecognizer::kMathCos : MethodRecognizer::kMathSin; | 2210 : MethodRecognizer::kMathSin; |
2281 Definition* def = curr_instr->InputAt(0)->definition(); | 2211 Definition* def = curr_instr->InputAt(0)->definition(); |
2282 for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { | 2212 for (intptr_t k = i + 1; k < merge_candidates->length(); k++) { |
2283 InvokeMathCFunctionInstr* other_op = (*merge_candidates)[k]; | 2213 InvokeMathCFunctionInstr* other_op = (*merge_candidates)[k]; |
2284 // 'other_op' can be NULL if it was already merged. | 2214 // 'other_op' can be NULL if it was already merged. |
2285 if ((other_op != NULL) && (other_op->recognized_kind() == other_kind) && | 2215 if ((other_op != NULL) && (other_op->recognized_kind() == other_kind) && |
2286 (other_op->InputAt(0)->definition() == def)) { | 2216 (other_op->InputAt(0)->definition() == def)) { |
2287 (*merge_candidates)[k] = NULL; // Clear it. | 2217 (*merge_candidates)[k] = NULL; // Clear it. |
2288 ASSERT(curr_instr->HasUses()); | 2218 ASSERT(curr_instr->HasUses()); |
2289 AppendExtractNthOutputForMerged(curr_instr, | 2219 AppendExtractNthOutputForMerged(curr_instr, |
2290 MergedMathInstr::OutputIndexOf(kind), | 2220 MergedMathInstr::OutputIndexOf(kind), |
2291 kUnboxedDouble, kDoubleCid); | 2221 kUnboxedDouble, kDoubleCid); |
2292 ASSERT(other_op->HasUses()); | 2222 ASSERT(other_op->HasUses()); |
2293 AppendExtractNthOutputForMerged( | 2223 AppendExtractNthOutputForMerged( |
2294 other_op, | 2224 other_op, MergedMathInstr::OutputIndexOf(other_kind), |
2295 MergedMathInstr::OutputIndexOf(other_kind), | |
2296 kUnboxedDouble, kDoubleCid); | 2225 kUnboxedDouble, kDoubleCid); |
2297 ZoneGrowableArray<Value*>* args = new(Z) ZoneGrowableArray<Value*>(1); | 2226 ZoneGrowableArray<Value*>* args = new (Z) ZoneGrowableArray<Value*>(1); |
2298 args->Add(new(Z) Value(curr_instr->InputAt(0)->definition())); | 2227 args->Add(new (Z) Value(curr_instr->InputAt(0)->definition())); |
2299 // Replace with SinCos. | 2228 // Replace with SinCos. |
2300 MergedMathInstr* sin_cos = | 2229 MergedMathInstr* sin_cos = new (Z) MergedMathInstr( |
2301 new(Z) MergedMathInstr(args, | 2230 args, curr_instr->DeoptimizationTarget(), MergedMathInstr::kSinCos); |
2302 curr_instr->DeoptimizationTarget(), | |
2303 MergedMathInstr::kSinCos); | |
2304 curr_instr->ReplaceWith(sin_cos, NULL); | 2231 curr_instr->ReplaceWith(sin_cos, NULL); |
2305 other_op->ReplaceUsesWith(sin_cos); | 2232 other_op->ReplaceUsesWith(sin_cos); |
2306 other_op->RemoveFromGraph(); | 2233 other_op->RemoveFromGraph(); |
2307 // Only one merge possible. Because canonicalization happens later, | 2234 // Only one merge possible. Because canonicalization happens later, |
2308 // more candidates are possible. | 2235 // more candidates are possible. |
2309 // TODO(srdjan): Allow merging of sin/cos into sincos. | 2236 // TODO(srdjan): Allow merging of sin/cos into sincos. |
2310 break; | 2237 break; |
2311 } | 2238 } |
2312 } | 2239 } |
2313 } | 2240 } |
2314 } | 2241 } |
2315 | 2242 |
2316 | 2243 |
2317 void FlowGraph::AppendExtractNthOutputForMerged(Definition* instr, | 2244 void FlowGraph::AppendExtractNthOutputForMerged(Definition* instr, |
2318 intptr_t index, | 2245 intptr_t index, |
2319 Representation rep, | 2246 Representation rep, |
2320 intptr_t cid) { | 2247 intptr_t cid) { |
2321 ExtractNthOutputInstr* extract = | 2248 ExtractNthOutputInstr* extract = |
2322 new(Z) ExtractNthOutputInstr(new(Z) Value(instr), index, rep, cid); | 2249 new (Z) ExtractNthOutputInstr(new (Z) Value(instr), index, rep, cid); |
2323 instr->ReplaceUsesWith(extract); | 2250 instr->ReplaceUsesWith(extract); |
2324 InsertAfter(instr, extract, NULL, FlowGraph::kValue); | 2251 InsertAfter(instr, extract, NULL, FlowGraph::kValue); |
2325 } | 2252 } |
2326 | 2253 |
2327 | 2254 |
2328 | |
2329 | |
2330 } // namespace dart | 2255 } // namespace dart |
OLD | NEW |