OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/move-optimizer.h" | 5 #include "src/compiler/move-optimizer.h" |
6 | 6 |
7 namespace v8 { | 7 namespace v8 { |
8 namespace internal { | 8 namespace internal { |
9 namespace compiler { | 9 namespace compiler { |
10 | 10 |
(...skipping 17 matching lines...) Expand all Loading... |
28 bool operator()(const InstructionOperand& a, | 28 bool operator()(const InstructionOperand& a, |
29 const InstructionOperand& b) const { | 29 const InstructionOperand& b) const { |
30 return a.CompareCanonicalized(b); | 30 return a.CompareCanonicalized(b); |
31 } | 31 } |
32 }; | 32 }; |
33 | 33 |
34 typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap; | 34 typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap; |
35 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet; | 35 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet; |
36 | 36 |
37 | 37 |
38 bool GapsCanMoveOver(Instruction* instr, Zone* zone) { | |
39 if (instr->IsNop()) return true; | |
40 if (instr->ClobbersTemps() || instr->ClobbersRegisters() || | |
41 instr->ClobbersDoubleRegisters()) { | |
42 return false; | |
43 } | |
44 if (instr->arch_opcode() != ArchOpcode::kArchNop) return false; | |
45 | |
46 ZoneSet<InstructionOperand, OperandCompare> operands(zone); | |
47 for (size_t i = 0; i < instr->InputCount(); ++i) { | |
48 operands.insert(*instr->InputAt(i)); | |
49 } | |
50 for (size_t i = 0; i < instr->OutputCount(); ++i) { | |
51 operands.insert(*instr->OutputAt(i)); | |
52 } | |
53 for (size_t i = 0; i < instr->TempCount(); ++i) { | |
54 operands.insert(*instr->TempAt(i)); | |
55 } | |
56 for (int i = Instruction::GapPosition::FIRST_GAP_POSITION; | |
57 i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) { | |
58 ParallelMove* moves = instr->parallel_moves()[i]; | |
59 if (moves == nullptr) continue; | |
60 for (MoveOperands* move : *moves) { | |
61 if (operands.count(move->source()) > 0 || | |
62 operands.count(move->destination()) > 0) { | |
63 return false; | |
64 } | |
65 } | |
66 } | |
67 return true; | |
68 } | |
69 | |
70 | |
71 int FindFirstNonEmptySlot(const Instruction* instr) { | 38 int FindFirstNonEmptySlot(const Instruction* instr) { |
72 int i = Instruction::FIRST_GAP_POSITION; | 39 int i = Instruction::FIRST_GAP_POSITION; |
73 for (; i <= Instruction::LAST_GAP_POSITION; i++) { | 40 for (; i <= Instruction::LAST_GAP_POSITION; i++) { |
74 ParallelMove* moves = instr->parallel_moves()[i]; | 41 ParallelMove* moves = instr->parallel_moves()[i]; |
75 if (moves == nullptr) continue; | 42 if (moves == nullptr) continue; |
76 for (MoveOperands* move : *moves) { | 43 for (MoveOperands* move : *moves) { |
77 if (!move->IsRedundant()) return i; | 44 if (!move->IsRedundant()) return i; |
78 move->Eliminate(); | 45 move->Eliminate(); |
79 } | 46 } |
80 moves->clear(); // Clear this redundant move. | 47 moves->clear(); // Clear this redundant move. |
81 } | 48 } |
82 return i; | 49 return i; |
83 } | 50 } |
84 | 51 |
85 } // namespace | 52 } // namespace |
86 | 53 |
87 | 54 |
88 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code) | 55 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code) |
89 : local_zone_(local_zone), | 56 : local_zone_(local_zone), |
90 code_(code), | 57 code_(code), |
91 to_finalize_(local_zone), | |
92 local_vector_(local_zone) {} | 58 local_vector_(local_zone) {} |
93 | 59 |
94 | 60 |
95 void MoveOptimizer::Run() { | 61 void MoveOptimizer::Run() { |
| 62 for (Instruction* instruction : code()->instructions()) { |
| 63 CompressGaps(instruction); |
| 64 } |
96 for (InstructionBlock* block : code()->instruction_blocks()) { | 65 for (InstructionBlock* block : code()->instruction_blocks()) { |
97 CompressBlock(block); | 66 CompressBlock(block); |
98 } | 67 } |
99 for (InstructionBlock* block : code()->instruction_blocks()) { | 68 for (InstructionBlock* block : code()->instruction_blocks()) { |
100 if (block->PredecessorCount() <= 1) continue; | 69 if (block->PredecessorCount() <= 1) continue; |
101 if (!block->IsDeferred()) { | 70 if (!block->IsDeferred()) { |
102 bool has_only_deferred = true; | 71 bool has_only_deferred = true; |
103 for (RpoNumber& pred_id : block->predecessors()) { | 72 for (RpoNumber& pred_id : block->predecessors()) { |
104 if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) { | 73 if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) { |
105 has_only_deferred = false; | 74 has_only_deferred = false; |
106 break; | 75 break; |
107 } | 76 } |
108 } | 77 } |
109 // This would pull down common moves. If the moves occur in deferred | 78 // This would pull down common moves. If the moves occur in deferred |
110 // blocks, and the closest common successor is not deferred, we lose the | 79 // blocks, and the closest common successor is not deferred, we lose the |
111 // optimization of just spilling/filling in deferred blocks, when the | 80 // optimization of just spilling/filling in deferred blocks, when the |
112 // current block is not deferred. | 81 // current block is not deferred. |
113 if (has_only_deferred) continue; | 82 if (has_only_deferred) continue; |
114 } | 83 } |
115 OptimizeMerge(block); | 84 OptimizeMerge(block); |
116 } | 85 } |
117 for (Instruction* gap : to_finalize_) { | 86 for (Instruction* gap : code()->instructions()) { |
118 FinalizeMoves(gap); | 87 FinalizeMoves(gap); |
119 } | 88 } |
120 } | 89 } |
121 | 90 |
| 91 void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) { |
| 92 if (instruction->IsCall()) return; |
| 93 ParallelMove* moves = instruction->parallel_moves()[0]; |
| 94 if (moves == nullptr) return; |
122 | 95 |
123 void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) { | 96 DCHECK(instruction->parallel_moves()[1] == nullptr || |
| 97 instruction->parallel_moves()[1]->empty()); |
| 98 |
| 99 OperandSet outputs(local_zone()); |
| 100 OperandSet inputs(local_zone()); |
| 101 |
| 102 // Outputs and temps are treated together as potentially clobbering a |
| 103 // destination operand. |
| 104 for (size_t i = 0; i < instruction->OutputCount(); ++i) { |
| 105 outputs.insert(*instruction->OutputAt(i)); |
| 106 } |
| 107 for (size_t i = 0; i < instruction->TempCount(); ++i) { |
| 108 outputs.insert(*instruction->TempAt(i)); |
| 109 } |
| 110 |
| 111 // Input operands block elisions. |
| 112 for (size_t i = 0; i < instruction->InputCount(); ++i) { |
| 113 inputs.insert(*instruction->InputAt(i)); |
| 114 } |
| 115 |
| 116 // Elide moves made redundant by the instruction. |
| 117 for (MoveOperands* move : *moves) { |
| 118 if (outputs.find(move->destination()) != outputs.end() && |
| 119 inputs.find(move->destination()) == inputs.end()) { |
| 120 move->Eliminate(); |
| 121 } |
| 122 } |
| 123 |
| 124 // The ret instruction makes any assignment before it unnecessary, except for |
| 125 // the one for its input. |
| 126 if (instruction->opcode() == ArchOpcode::kArchRet) { |
| 127 for (MoveOperands* move : *moves) { |
| 128 if (inputs.find(move->destination()) == inputs.end()) { |
| 129 move->Eliminate(); |
| 130 } |
| 131 } |
| 132 } |
| 133 } |
| 134 |
| 135 void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) { |
| 136 if (from->IsCall()) return; |
| 137 |
| 138 ParallelMove* from_moves = from->parallel_moves()[0]; |
| 139 if (from_moves == nullptr || from_moves->empty()) return; |
| 140 |
| 141 ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone()); |
| 142 ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone()); |
| 143 |
| 144 // If an operand is an input to the instruction, we cannot move assignments |
| 145 // where it appears on the LHS. |
| 146 for (size_t i = 0; i < from->InputCount(); ++i) { |
| 147 dst_cant_be.insert(*from->InputAt(i)); |
| 148 } |
| 149 // If an operand is output to the instruction, we cannot move assignments |
| 150 // where it appears on the RHS, because we would lose its value before the |
| 151 // instruction. |
| 152 // Same for temp operands. |
| 153 // The output can't appear on the LHS because we performed |
| 154 // RemoveClobberedDestinations for the "from" instruction. |
| 155 for (size_t i = 0; i < from->OutputCount(); ++i) { |
| 156 src_cant_be.insert(*from->OutputAt(i)); |
| 157 } |
| 158 for (size_t i = 0; i < from->TempCount(); ++i) { |
| 159 src_cant_be.insert(*from->TempAt(i)); |
| 160 } |
| 161 for (MoveOperands* move : *from_moves) { |
| 162 if (move->IsRedundant()) continue; |
| 163 // Assume dest has a value "V". If we have a "dest = y" move, then we can't |
| 164 // move "z = dest", because z would become y rather than "V". |
| 165 // We assume CompressMoves has happened before this, which means we don't |
| 166 // have more than one assignment to dest. |
| 167 src_cant_be.insert(move->destination()); |
| 168 } |
| 169 |
| 170 ZoneSet<MoveKey, MoveKeyCompare> move_candidates(local_zone()); |
| 171 // We start with all the moves that don't have conflicting source or |
| 172 // destination operands are eligible for being moved down. |
| 173 for (MoveOperands* move : *from_moves) { |
| 174 if (move->IsRedundant()) continue; |
| 175 if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) { |
| 176 MoveKey key = {move->source(), move->destination()}; |
| 177 move_candidates.insert(key); |
| 178 } |
| 179 } |
| 180 if (move_candidates.empty()) return; |
| 181 |
| 182 // Stabilize the candidate set. |
| 183 bool changed = false; |
| 184 do { |
| 185 changed = false; |
| 186 for (auto iter = move_candidates.begin(); iter != move_candidates.end();) { |
| 187 auto current = iter; |
| 188 ++iter; |
| 189 InstructionOperand src = current->source; |
| 190 if (src_cant_be.find(src) != src_cant_be.end()) { |
| 191 src_cant_be.insert(current->destination); |
| 192 move_candidates.erase(current); |
| 193 changed = true; |
| 194 } |
| 195 } |
| 196 } while (changed); |
| 197 |
| 198 ParallelMove to_move(local_zone()); |
| 199 for (MoveOperands* move : *from_moves) { |
| 200 if (move->IsRedundant()) continue; |
| 201 MoveKey key = {move->source(), move->destination()}; |
| 202 if (move_candidates.find(key) != move_candidates.end()) { |
| 203 to_move.AddMove(move->source(), move->destination(), code_zone()); |
| 204 move->Eliminate(); |
| 205 } |
| 206 } |
| 207 if (to_move.empty()) return; |
| 208 |
| 209 ParallelMove* dest = |
| 210 to->GetOrCreateParallelMove(Instruction::GapPosition::START, code_zone()); |
| 211 |
| 212 CompressMoves(&to_move, dest); |
| 213 DCHECK(dest->empty()); |
| 214 for (MoveOperands* m : to_move) { |
| 215 dest->push_back(m); |
| 216 } |
| 217 } |
| 218 |
| 219 void MoveOptimizer::CompressMoves(ParallelMove* left, MoveOpVector* right) { |
124 if (right == nullptr) return; | 220 if (right == nullptr) return; |
125 | 221 |
126 MoveOpVector& eliminated = local_vector(); | 222 MoveOpVector& eliminated = local_vector(); |
127 DCHECK(eliminated.empty()); | 223 DCHECK(eliminated.empty()); |
128 | 224 |
129 if (!left->empty()) { | 225 if (!left->empty()) { |
130 // Modify the right moves in place and collect moves that will be killed by | 226 // Modify the right moves in place and collect moves that will be killed by |
131 // merging the two gaps. | 227 // merging the two gaps. |
132 for (MoveOperands* move : *right) { | 228 for (MoveOperands* move : *right) { |
133 if (move->IsRedundant()) continue; | 229 if (move->IsRedundant()) continue; |
134 MoveOperands* to_eliminate = left->PrepareInsertAfter(move); | 230 MoveOperands* to_eliminate = left->PrepareInsertAfter(move); |
135 if (to_eliminate != nullptr) eliminated.push_back(to_eliminate); | 231 if (to_eliminate != nullptr) eliminated.push_back(to_eliminate); |
136 } | 232 } |
137 // Eliminate dead moves. | 233 // Eliminate dead moves. |
138 for (MoveOperands* to_eliminate : eliminated) { | 234 for (MoveOperands* to_eliminate : eliminated) { |
139 to_eliminate->Eliminate(); | 235 to_eliminate->Eliminate(); |
140 } | 236 } |
141 eliminated.clear(); | 237 eliminated.clear(); |
142 } | 238 } |
143 // Add all possibly modified moves from right side. | 239 // Add all possibly modified moves from right side. |
144 for (MoveOperands* move : *right) { | 240 for (MoveOperands* move : *right) { |
145 if (move->IsRedundant()) continue; | 241 if (move->IsRedundant()) continue; |
146 left->push_back(move); | 242 left->push_back(move); |
147 } | 243 } |
148 // Nuke right. | 244 // Nuke right. |
149 right->clear(); | 245 right->clear(); |
150 DCHECK(eliminated.empty()); | 246 DCHECK(eliminated.empty()); |
151 } | 247 } |
152 | 248 |
| 249 void MoveOptimizer::CompressGaps(Instruction* instruction) { |
| 250 int i = FindFirstNonEmptySlot(instruction); |
| 251 bool has_moves = i <= Instruction::LAST_GAP_POSITION; |
| 252 USE(has_moves); |
153 | 253 |
154 // Smash all consecutive moves into the left most move slot and accumulate them | 254 if (i == Instruction::LAST_GAP_POSITION) { |
155 // as much as possible across instructions. | 255 std::swap(instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION], |
| 256 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]); |
| 257 } else if (i == Instruction::FIRST_GAP_POSITION) { |
| 258 CompressMoves( |
| 259 instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION], |
| 260 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]); |
| 261 } |
| 262 // We either have no moves, or, after swapping or compressing, we have |
| 263 // all the moves in the first gap position, and none in the second/end gap |
| 264 // position. |
| 265 ParallelMove* first = |
| 266 instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION]; |
| 267 ParallelMove* last = |
| 268 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]; |
| 269 USE(first); |
| 270 USE(last); |
| 271 |
| 272 DCHECK(!has_moves || |
| 273 (first != nullptr && (last == nullptr || last->empty()))); |
| 274 } |
| 275 |
156 void MoveOptimizer::CompressBlock(InstructionBlock* block) { | 276 void MoveOptimizer::CompressBlock(InstructionBlock* block) { |
157 Instruction* prev_instr = nullptr; | 277 int first_instr_index = block->first_instruction_index(); |
158 for (int index = block->code_start(); index < block->code_end(); ++index) { | 278 int last_instr_index = block->last_instruction_index(); |
| 279 |
| 280 // Start by removing gap assignments where the output of the subsequent |
| 281 // instruction appears on LHS, as long as they are not needed by its input. |
| 282 Instruction* prev_instr = code()->instructions()[first_instr_index]; |
| 283 RemoveClobberedDestinations(prev_instr); |
| 284 |
| 285 for (int index = first_instr_index + 1; index <= last_instr_index; ++index) { |
159 Instruction* instr = code()->instructions()[index]; | 286 Instruction* instr = code()->instructions()[index]; |
160 int i = FindFirstNonEmptySlot(instr); | 287 // Migrate to the gap of prev_instr eligible moves from instr. |
161 bool has_moves = i <= Instruction::LAST_GAP_POSITION; | 288 MigrateMoves(instr, prev_instr); |
162 | 289 // Remove gap assignments clobbered by instr's output. |
163 if (i == Instruction::LAST_GAP_POSITION) { | 290 RemoveClobberedDestinations(instr); |
164 std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION], | 291 prev_instr = instr; |
165 instr->parallel_moves()[Instruction::LAST_GAP_POSITION]); | |
166 } else if (i == Instruction::FIRST_GAP_POSITION) { | |
167 CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION], | |
168 instr->parallel_moves()[Instruction::LAST_GAP_POSITION]); | |
169 } | |
170 // We either have no moves, or, after swapping or compressing, we have | |
171 // all the moves in the first gap position, and none in the second/end gap | |
172 // position. | |
173 ParallelMove* first = | |
174 instr->parallel_moves()[Instruction::FIRST_GAP_POSITION]; | |
175 ParallelMove* last = | |
176 instr->parallel_moves()[Instruction::LAST_GAP_POSITION]; | |
177 USE(last); | |
178 | |
179 DCHECK(!has_moves || | |
180 (first != nullptr && (last == nullptr || last->empty()))); | |
181 | |
182 if (prev_instr != nullptr) { | |
183 if (has_moves) { | |
184 // Smash first into prev_instr, killing left. | |
185 ParallelMove* pred_moves = prev_instr->parallel_moves()[0]; | |
186 CompressMoves(pred_moves, first); | |
187 } | |
188 // Slide prev_instr down so we always know where to look for it. | |
189 std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]); | |
190 } | |
191 | |
192 prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr; | |
193 if (GapsCanMoveOver(instr, local_zone())) continue; | |
194 if (prev_instr != nullptr) { | |
195 to_finalize_.push_back(prev_instr); | |
196 prev_instr = nullptr; | |
197 } | |
198 } | |
199 if (prev_instr != nullptr) { | |
200 to_finalize_.push_back(prev_instr); | |
201 } | 292 } |
202 } | 293 } |
203 | 294 |
204 | 295 |
205 const Instruction* MoveOptimizer::LastInstruction( | 296 const Instruction* MoveOptimizer::LastInstruction( |
206 const InstructionBlock* block) const { | 297 const InstructionBlock* block) const { |
207 return code()->instructions()[block->last_instruction_index()]; | 298 return code()->instructions()[block->last_instruction_index()]; |
208 } | 299 } |
209 | 300 |
210 | 301 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
251 res.first->second++; | 342 res.first->second++; |
252 if (res.first->second == block->PredecessorCount()) { | 343 if (res.first->second == block->PredecessorCount()) { |
253 correct_counts++; | 344 correct_counts++; |
254 } | 345 } |
255 } | 346 } |
256 } | 347 } |
257 } | 348 } |
258 if (move_map.empty() || correct_counts == 0) return; | 349 if (move_map.empty() || correct_counts == 0) return; |
259 | 350 |
260 // Find insertion point. | 351 // Find insertion point. |
261 Instruction* instr = nullptr; | 352 Instruction* instr = code()->instructions()[block->first_instruction_index()]; |
262 for (int i = block->first_instruction_index(); | |
263 i <= block->last_instruction_index(); ++i) { | |
264 instr = code()->instructions()[i]; | |
265 if (correct_counts != move_map.size() || | |
266 !GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant()) | |
267 break; | |
268 } | |
269 DCHECK_NOT_NULL(instr); | |
270 | 353 |
271 if (correct_counts != move_map.size()) { | 354 if (correct_counts != move_map.size()) { |
272 // Moves that are unique to each predecessor won't be pushed to the common | 355 // Moves that are unique to each predecessor won't be pushed to the common |
273 // successor. | 356 // successor. |
274 OperandSet conflicting_srcs(local_zone()); | 357 OperandSet conflicting_srcs(local_zone()); |
275 for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) { | 358 for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) { |
276 auto current = iter; | 359 auto current = iter; |
277 ++iter; | 360 ++iter; |
278 if (current->second != block->PredecessorCount()) { | 361 if (current->second != block->PredecessorCount()) { |
279 InstructionOperand dest = current->first.destination; | 362 InstructionOperand dest = current->first.destination; |
(...skipping 22 matching lines...) Expand all Loading... |
302 changed = true; | 385 changed = true; |
303 } | 386 } |
304 } | 387 } |
305 } while (changed); | 388 } while (changed); |
306 } | 389 } |
307 | 390 |
308 if (move_map.empty()) return; | 391 if (move_map.empty()) return; |
309 | 392 |
310 DCHECK_NOT_NULL(instr); | 393 DCHECK_NOT_NULL(instr); |
311 bool gap_initialized = true; | 394 bool gap_initialized = true; |
312 if (instr->parallel_moves()[0] == nullptr || | 395 if (instr->parallel_moves()[0] != nullptr && |
313 instr->parallel_moves()[0]->empty()) { | 396 !instr->parallel_moves()[0]->empty()) { |
314 to_finalize_.push_back(instr); | |
315 } else { | |
316 // Will compress after insertion. | 397 // Will compress after insertion. |
317 gap_initialized = false; | 398 gap_initialized = false; |
318 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]); | 399 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]); |
319 } | 400 } |
320 ParallelMove* moves = instr->GetOrCreateParallelMove( | 401 ParallelMove* moves = instr->GetOrCreateParallelMove( |
321 static_cast<Instruction::GapPosition>(0), code_zone()); | 402 static_cast<Instruction::GapPosition>(0), code_zone()); |
322 // Delete relevant entries in predecessors and move everything to block. | 403 // Delete relevant entries in predecessors and move everything to block. |
323 bool first_iteration = true; | 404 bool first_iteration = true; |
324 for (RpoNumber& pred_index : block->predecessors()) { | 405 for (RpoNumber& pred_index : block->predecessors()) { |
325 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index); | 406 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index); |
326 for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) { | 407 for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) { |
327 if (move->IsRedundant()) continue; | 408 if (move->IsRedundant()) continue; |
328 MoveKey key = {move->source(), move->destination()}; | 409 MoveKey key = {move->source(), move->destination()}; |
329 auto it = move_map.find(key); | 410 auto it = move_map.find(key); |
330 if (it != move_map.end()) { | 411 if (it != move_map.end()) { |
331 if (first_iteration) { | 412 if (first_iteration) { |
332 moves->AddMove(move->source(), move->destination()); | 413 moves->AddMove(move->source(), move->destination()); |
333 } | 414 } |
334 move->Eliminate(); | 415 move->Eliminate(); |
335 } | 416 } |
336 } | 417 } |
337 first_iteration = false; | 418 first_iteration = false; |
338 } | 419 } |
339 // Compress. | 420 // Compress. |
340 if (!gap_initialized) { | 421 if (!gap_initialized) { |
341 CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]); | 422 CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]); |
342 } | 423 } |
| 424 CompressBlock(block); |
343 } | 425 } |
344 | 426 |
345 | 427 |
346 namespace { | 428 namespace { |
347 | 429 |
348 bool IsSlot(const InstructionOperand& op) { | 430 bool IsSlot(const InstructionOperand& op) { |
349 return op.IsStackSlot() || op.IsDoubleStackSlot(); | 431 return op.IsStackSlot() || op.IsDoubleStackSlot(); |
350 } | 432 } |
351 | 433 |
352 | 434 |
353 bool LoadCompare(const MoveOperands* a, const MoveOperands* b) { | 435 bool LoadCompare(const MoveOperands* a, const MoveOperands* b) { |
354 if (!a->source().EqualsCanonicalized(b->source())) { | 436 if (!a->source().EqualsCanonicalized(b->source())) { |
355 return a->source().CompareCanonicalized(b->source()); | 437 return a->source().CompareCanonicalized(b->source()); |
356 } | 438 } |
357 if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false; | 439 if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false; |
358 if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true; | 440 if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true; |
359 return a->destination().CompareCanonicalized(b->destination()); | 441 return a->destination().CompareCanonicalized(b->destination()); |
360 } | 442 } |
361 | 443 |
362 } // namespace | 444 } // namespace |
363 | 445 |
364 | 446 |
365 // Split multiple loads of the same constant or stack slot off into the second | 447 // Split multiple loads of the same constant or stack slot off into the second |
366 // slot and keep remaining moves in the first slot. | 448 // slot and keep remaining moves in the first slot. |
367 void MoveOptimizer::FinalizeMoves(Instruction* instr) { | 449 void MoveOptimizer::FinalizeMoves(Instruction* instr) { |
368 MoveOpVector& loads = local_vector(); | 450 MoveOpVector& loads = local_vector(); |
369 DCHECK(loads.empty()); | 451 DCHECK(loads.empty()); |
370 | 452 |
| 453 ParallelMove* parallel_moves = instr->parallel_moves()[0]; |
| 454 if (parallel_moves == nullptr) return; |
371 // Find all the loads. | 455 // Find all the loads. |
372 for (MoveOperands* move : *instr->parallel_moves()[0]) { | 456 for (MoveOperands* move : *parallel_moves) { |
373 if (move->IsRedundant()) continue; | 457 if (move->IsRedundant()) continue; |
374 if (move->source().IsConstant() || IsSlot(move->source())) { | 458 if (move->source().IsConstant() || IsSlot(move->source())) { |
375 loads.push_back(move); | 459 loads.push_back(move); |
376 } | 460 } |
377 } | 461 } |
378 if (loads.empty()) return; | 462 if (loads.empty()) return; |
379 // Group the loads by source, moving the preferred destination to the | 463 // Group the loads by source, moving the preferred destination to the |
380 // beginning of the group. | 464 // beginning of the group. |
381 std::sort(loads.begin(), loads.end(), LoadCompare); | 465 std::sort(loads.begin(), loads.end(), LoadCompare); |
382 MoveOperands* group_begin = nullptr; | 466 MoveOperands* group_begin = nullptr; |
(...skipping 11 matching lines...) Expand all Loading... |
394 static_cast<Instruction::GapPosition>(1), code_zone()); | 478 static_cast<Instruction::GapPosition>(1), code_zone()); |
395 slot_1->AddMove(group_begin->destination(), load->destination()); | 479 slot_1->AddMove(group_begin->destination(), load->destination()); |
396 load->Eliminate(); | 480 load->Eliminate(); |
397 } | 481 } |
398 loads.clear(); | 482 loads.clear(); |
399 } | 483 } |
400 | 484 |
401 } // namespace compiler | 485 } // namespace compiler |
402 } // namespace internal | 486 } // namespace internal |
403 } // namespace v8 | 487 } // namespace v8 |
OLD | NEW |