Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/compiler/move-optimizer.cc

Issue 1634093002: [turbofan] fine grained in-block move optimization (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: temp Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/move-optimizer.h" 5 #include "src/compiler/move-optimizer.h"
6 6
7 namespace v8 { 7 namespace v8 {
8 namespace internal { 8 namespace internal {
9 namespace compiler { 9 namespace compiler {
10 10
(...skipping 17 matching lines...) Expand all
28 bool operator()(const InstructionOperand& a, 28 bool operator()(const InstructionOperand& a,
29 const InstructionOperand& b) const { 29 const InstructionOperand& b) const {
30 return a.CompareCanonicalized(b); 30 return a.CompareCanonicalized(b);
31 } 31 }
32 }; 32 };
33 33
34 typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap; 34 typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
35 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet; 35 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
36 36
37 37
38 bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
39 if (instr->IsNop()) return true;
40 if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
41 instr->ClobbersDoubleRegisters()) {
42 return false;
43 }
44 if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
45
46 ZoneSet<InstructionOperand, OperandCompare> operands(zone);
47 for (size_t i = 0; i < instr->InputCount(); ++i) {
48 operands.insert(*instr->InputAt(i));
49 }
50 for (size_t i = 0; i < instr->OutputCount(); ++i) {
51 operands.insert(*instr->OutputAt(i));
52 }
53 for (size_t i = 0; i < instr->TempCount(); ++i) {
54 operands.insert(*instr->TempAt(i));
55 }
56 for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
57 i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
58 ParallelMove* moves = instr->parallel_moves()[i];
59 if (moves == nullptr) continue;
60 for (MoveOperands* move : *moves) {
61 if (operands.count(move->source()) > 0 ||
62 operands.count(move->destination()) > 0) {
63 return false;
64 }
65 }
66 }
67 return true;
68 }
69
70
71 int FindFirstNonEmptySlot(const Instruction* instr) { 38 int FindFirstNonEmptySlot(const Instruction* instr) {
72 int i = Instruction::FIRST_GAP_POSITION; 39 int i = Instruction::FIRST_GAP_POSITION;
73 for (; i <= Instruction::LAST_GAP_POSITION; i++) { 40 for (; i <= Instruction::LAST_GAP_POSITION; i++) {
74 ParallelMove* moves = instr->parallel_moves()[i]; 41 ParallelMove* moves = instr->parallel_moves()[i];
75 if (moves == nullptr) continue; 42 if (moves == nullptr) continue;
76 for (MoveOperands* move : *moves) { 43 for (MoveOperands* move : *moves) {
77 if (!move->IsRedundant()) return i; 44 if (!move->IsRedundant()) return i;
78 move->Eliminate(); 45 move->Eliminate();
79 } 46 }
80 moves->clear(); // Clear this redundant move. 47 moves->clear(); // Clear this redundant move.
81 } 48 }
82 return i; 49 return i;
83 } 50 }
84 51
85 } // namespace 52 } // namespace
86 53
87 54
88 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code) 55 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
89 : local_zone_(local_zone), 56 : local_zone_(local_zone),
90 code_(code), 57 code_(code),
91 to_finalize_(local_zone),
92 local_vector_(local_zone) {} 58 local_vector_(local_zone) {}
93 59
94 60
95 void MoveOptimizer::Run() { 61 void MoveOptimizer::Run() {
62 for (Instruction* instruction : code()->instructions()) {
63 CompressGaps(instruction);
64 }
96 for (InstructionBlock* block : code()->instruction_blocks()) { 65 for (InstructionBlock* block : code()->instruction_blocks()) {
97 CompressBlock(block); 66 CompressBlock(block);
98 } 67 }
99 for (InstructionBlock* block : code()->instruction_blocks()) { 68 for (InstructionBlock* block : code()->instruction_blocks()) {
100 if (block->PredecessorCount() <= 1) continue; 69 if (block->PredecessorCount() <= 1) continue;
101 if (!block->IsDeferred()) { 70 if (!block->IsDeferred()) {
102 bool has_only_deferred = true; 71 bool has_only_deferred = true;
103 for (RpoNumber& pred_id : block->predecessors()) { 72 for (RpoNumber& pred_id : block->predecessors()) {
104 if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) { 73 if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
105 has_only_deferred = false; 74 has_only_deferred = false;
106 break; 75 break;
107 } 76 }
108 } 77 }
109 // This would pull down common moves. If the moves occur in deferred 78 // This would pull down common moves. If the moves occur in deferred
110 // blocks, and the closest common successor is not deferred, we lose the 79 // blocks, and the closest common successor is not deferred, we lose the
111 // optimization of just spilling/filling in deferred blocks, when the 80 // optimization of just spilling/filling in deferred blocks, when the
112 // current block is not deferred. 81 // current block is not deferred.
113 if (has_only_deferred) continue; 82 if (has_only_deferred) continue;
114 } 83 }
115 OptimizeMerge(block); 84 OptimizeMerge(block);
116 } 85 }
117 for (Instruction* gap : to_finalize_) { 86 for (Instruction* gap : code()->instructions()) {
118 FinalizeMoves(gap); 87 FinalizeMoves(gap);
119 } 88 }
120 } 89 }
121 90
91 void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) {
92 if (instruction->IsCall()) return;
93 ParallelMove* moves = instruction->parallel_moves()[0];
94 if (moves == nullptr) return;
122 95
123 void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) { 96 DCHECK(instruction->parallel_moves()[1] == nullptr ||
97 instruction->parallel_moves()[1]->empty());
98
99 OperandSet outputs(local_zone());
100 OperandSet inputs(local_zone());
101
102 // Outputs and temps are treated together as potentially clobbering a
103 // destination operand.
104 for (size_t i = 0; i < instruction->OutputCount(); ++i) {
105 outputs.insert(*instruction->OutputAt(i));
106 }
107 for (size_t i = 0; i < instruction->TempCount(); ++i) {
108 outputs.insert(*instruction->TempAt(i));
109 }
110
111 // Input operands block elisions.
112 for (size_t i = 0; i < instruction->InputCount(); ++i) {
113 inputs.insert(*instruction->InputAt(i));
114 }
115
116 // Elide moves made redundant by the instruction.
117 for (MoveOperands* move : *moves) {
118 if (outputs.find(move->destination()) != outputs.end() &&
119 inputs.find(move->destination()) == inputs.end()) {
120 move->Eliminate();
121 }
122 }
123
124 // ret makes any assignment before it unnecessary, except for the one
Jarin 2016/02/03 05:25:17 Nit: Start comments with capital letters, please.
Mircea Trofin 2016/02/04 05:23:31 Done.
125 // for its input.
126 if (instruction->opcode() == ArchOpcode::kArchRet) {
127 for (MoveOperands* move : *moves) {
128 if (inputs.find(move->destination()) == inputs.end()) {
129 move->Eliminate();
130 }
131 }
132 }
133 }
134
135 void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
136 if (from->IsCall()) return;
137
138 ParallelMove* from_moves = from->parallel_moves()[0];
139 if (from_moves == nullptr || from_moves->empty()) return;
140
141 ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone());
142 ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone());
143
144 // if an operand is an input to the instruction, we cannot move assignments
145 // where it appears on the LHS
Jarin 2016/02/03 05:25:17 Nit: Dot at the end of sentences. (Here and below.
Mircea Trofin 2016/02/04 05:23:31 Done.
146 for (size_t i = 0; i < from->InputCount(); ++i) {
147 dst_cant_be.insert(*from->InputAt(i));
148 }
149 // if an operand is output to the instruction, we cannot move assignments
150 // where it appears on the RHS, because we would lose its value before the
151 // instruction.
152 // Same for temp operands.
Jarin 2016/02/03 05:25:17 I do not understand why it is still ok to move ass
Mircea Trofin 2016/02/04 05:23:32 It's because we first perform RemoveClobberedDesti
153 for (size_t i = 0; i < from->OutputCount(); ++i) {
154 src_cant_be.insert(*from->OutputAt(i));
155 }
156 for (size_t i = 0; i < from->TempCount(); ++i) {
157 src_cant_be.insert(*from->TempAt(i));
158 }
159 for (MoveOperands* move : *from->parallel_moves()[0]) {
Jarin 2016/02/03 05:25:17 from->parallel_moves[0] ==> from_moves
Mircea Trofin 2016/02/04 05:23:32 Done.
160 if (move->IsRedundant()) continue;
161 // assume dest has a value "V". If we have a "dest = y" move, then we can't
162 // move "z = dest", because z would become y rather than "V"
163 src_cant_be.insert(move->destination());
Jarin 2016/02/03 05:25:18 Again, would not it be also bad to swap "dest = y"
Mircea Trofin 2016/02/04 05:23:31 By this stage, we did CompressMoves which takes ca
164 }
165
166 ZoneSet<MoveKey, MoveKeyCompare> move_candidates(local_zone());
167 // We start with all the moves that don't have conflicting source or
168 // destination operands are eligible for being moved down.
169 for (MoveOperands* move : *from_moves) {
170 if (move->IsRedundant()) continue;
171 if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) {
172 MoveKey key = {move->source(), move->destination()};
173 move_candidates.insert(key);
174 }
175 }
176 if (move_candidates.empty()) return;
177
178 // Stabilize the candidate set.
179 bool changed = false;
180 do {
181 changed = false;
182 for (auto iter = move_candidates.begin(); iter != move_candidates.end();) {
183 auto current = iter;
184 ++iter;
185 InstructionOperand src = current->source;
186 if (src_cant_be.find(src) != src_cant_be.end()) {
187 src_cant_be.insert(current->destination);
188 move_candidates.erase(current);
189 changed = true;
190 }
191 }
192 } while (changed);
193
194 ParallelMove to_move(local_zone());
195 for (MoveOperands* move : *from_moves) {
196 if (move->IsRedundant()) continue;
197 MoveKey key = {move->source(), move->destination()};
198 if (move_candidates.find(key) != move_candidates.end()) {
199 to_move.AddMove(move->source(), move->destination(), code_zone());
200 move->Eliminate();
201 }
202 }
203 if (to_move.empty()) return;
204
205 ParallelMove* dest =
206 to->GetOrCreateParallelMove(Instruction::GapPosition::START, code_zone());
207
208 CompressMoves(&to_move, dest);
209 DCHECK(dest->empty());
210 for (MoveOperands* m : to_move) {
211 dest->push_back(m);
212 }
213 }
214
215 void MoveOptimizer::CompressMoves(ParallelMove* left, MoveOpVector* right) {
124 if (right == nullptr) return; 216 if (right == nullptr) return;
125 217
126 MoveOpVector& eliminated = local_vector(); 218 MoveOpVector& eliminated = local_vector();
127 DCHECK(eliminated.empty()); 219 DCHECK(eliminated.empty());
128 220
129 if (!left->empty()) { 221 if (!left->empty()) {
130 // Modify the right moves in place and collect moves that will be killed by 222 // Modify the right moves in place and collect moves that will be killed by
131 // merging the two gaps. 223 // merging the two gaps.
132 for (MoveOperands* move : *right) { 224 for (MoveOperands* move : *right) {
133 if (move->IsRedundant()) continue; 225 if (move->IsRedundant()) continue;
134 MoveOperands* to_eliminate = left->PrepareInsertAfter(move); 226 MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
135 if (to_eliminate != nullptr) eliminated.push_back(to_eliminate); 227 if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
136 } 228 }
137 // Eliminate dead moves. 229 // Eliminate dead moves.
138 for (MoveOperands* to_eliminate : eliminated) { 230 for (MoveOperands* to_eliminate : eliminated) {
139 to_eliminate->Eliminate(); 231 to_eliminate->Eliminate();
140 } 232 }
141 eliminated.clear(); 233 eliminated.clear();
142 } 234 }
143 // Add all possibly modified moves from right side. 235 // Add all possibly modified moves from right side.
144 for (MoveOperands* move : *right) { 236 for (MoveOperands* move : *right) {
145 if (move->IsRedundant()) continue; 237 if (move->IsRedundant()) continue;
146 left->push_back(move); 238 left->push_back(move);
147 } 239 }
148 // Nuke right. 240 // Nuke right.
149 right->clear(); 241 right->clear();
150 DCHECK(eliminated.empty()); 242 DCHECK(eliminated.empty());
151 } 243 }
152 244
245 void MoveOptimizer::CompressGaps(Instruction* instruction) {
246 int i = FindFirstNonEmptySlot(instruction);
247 bool has_moves = i <= Instruction::LAST_GAP_POSITION;
248 USE(has_moves);
153 249
154 // Smash all consecutive moves into the left most move slot and accumulate them 250 if (i == Instruction::LAST_GAP_POSITION) {
155 // as much as possible across instructions. 251 std::swap(instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
252 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
253 } else if (i == Instruction::FIRST_GAP_POSITION) {
254 CompressMoves(
255 instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
256 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
257 }
258 // We either have no moves, or, after swapping or compressing, we have
259 // all the moves in the first gap position, and none in the second/end gap
260 // position.
261 ParallelMove* first =
262 instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION];
263 ParallelMove* last =
264 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION];
265 USE(first);
266 USE(last);
267
268 DCHECK(!has_moves ||
269 (first != nullptr && (last == nullptr || last->empty())));
270 }
271
156 void MoveOptimizer::CompressBlock(InstructionBlock* block) { 272 void MoveOptimizer::CompressBlock(InstructionBlock* block) {
157 Instruction* prev_instr = nullptr; 273 int first_instr_index = block->first_instruction_index();
158 for (int index = block->code_start(); index < block->code_end(); ++index) { 274 int last_instr_index = block->last_instruction_index();
275
276 Instruction* prev_instr = code()->instructions()[first_instr_index];
277 RemoveClobberedDestinations(prev_instr);
278
279 for (int index = first_instr_index + 1; index <= last_instr_index; ++index) {
159 Instruction* instr = code()->instructions()[index]; 280 Instruction* instr = code()->instructions()[index];
160 int i = FindFirstNonEmptySlot(instr); 281 MigrateMoves(instr, prev_instr);
161 bool has_moves = i <= Instruction::LAST_GAP_POSITION; 282 RemoveClobberedDestinations(instr);
162 283 prev_instr = instr;
163 if (i == Instruction::LAST_GAP_POSITION) {
164 std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
165 instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
166 } else if (i == Instruction::FIRST_GAP_POSITION) {
167 CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
168 instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
169 }
170 // We either have no moves, or, after swapping or compressing, we have
171 // all the moves in the first gap position, and none in the second/end gap
172 // position.
173 ParallelMove* first =
174 instr->parallel_moves()[Instruction::FIRST_GAP_POSITION];
175 ParallelMove* last =
176 instr->parallel_moves()[Instruction::LAST_GAP_POSITION];
177 USE(last);
178
179 DCHECK(!has_moves ||
180 (first != nullptr && (last == nullptr || last->empty())));
181
182 if (prev_instr != nullptr) {
183 if (has_moves) {
184 // Smash first into prev_instr, killing left.
185 ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
186 CompressMoves(pred_moves, first);
187 }
188 // Slide prev_instr down so we always know where to look for it.
189 std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
190 }
191
192 prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
193 if (GapsCanMoveOver(instr, local_zone())) continue;
194 if (prev_instr != nullptr) {
195 to_finalize_.push_back(prev_instr);
196 prev_instr = nullptr;
197 }
198 }
199 if (prev_instr != nullptr) {
200 to_finalize_.push_back(prev_instr);
201 } 284 }
202 } 285 }
203 286
204 287
205 const Instruction* MoveOptimizer::LastInstruction( 288 const Instruction* MoveOptimizer::LastInstruction(
206 const InstructionBlock* block) const { 289 const InstructionBlock* block) const {
207 return code()->instructions()[block->last_instruction_index()]; 290 return code()->instructions()[block->last_instruction_index()];
208 } 291 }
209 292
210 293
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
251 res.first->second++; 334 res.first->second++;
252 if (res.first->second == block->PredecessorCount()) { 335 if (res.first->second == block->PredecessorCount()) {
253 correct_counts++; 336 correct_counts++;
254 } 337 }
255 } 338 }
256 } 339 }
257 } 340 }
258 if (move_map.empty() || correct_counts == 0) return; 341 if (move_map.empty() || correct_counts == 0) return;
259 342
260 // Find insertion point. 343 // Find insertion point.
261 Instruction* instr = nullptr; 344 Instruction* instr = code()->instructions()[block->first_instruction_index()];
262 for (int i = block->first_instruction_index();
263 i <= block->last_instruction_index(); ++i) {
264 instr = code()->instructions()[i];
265 if (correct_counts != move_map.size() ||
266 !GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
267 break;
268 }
269 DCHECK_NOT_NULL(instr);
270 345
271 if (correct_counts != move_map.size()) { 346 if (correct_counts != move_map.size()) {
272 // Moves that are unique to each predecessor won't be pushed to the common 347 // Moves that are unique to each predecessor won't be pushed to the common
273 // successor. 348 // successor.
274 OperandSet conflicting_srcs(local_zone()); 349 OperandSet conflicting_srcs(local_zone());
275 for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) { 350 for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
276 auto current = iter; 351 auto current = iter;
277 ++iter; 352 ++iter;
278 if (current->second != block->PredecessorCount()) { 353 if (current->second != block->PredecessorCount()) {
279 InstructionOperand dest = current->first.destination; 354 InstructionOperand dest = current->first.destination;
(...skipping 22 matching lines...) Expand all
302 changed = true; 377 changed = true;
303 } 378 }
304 } 379 }
305 } while (changed); 380 } while (changed);
306 } 381 }
307 382
308 if (move_map.empty()) return; 383 if (move_map.empty()) return;
309 384
310 DCHECK_NOT_NULL(instr); 385 DCHECK_NOT_NULL(instr);
311 bool gap_initialized = true; 386 bool gap_initialized = true;
312 if (instr->parallel_moves()[0] == nullptr || 387 if (instr->parallel_moves()[0] != nullptr &&
313 instr->parallel_moves()[0]->empty()) { 388 !instr->parallel_moves()[0]->empty()) {
314 to_finalize_.push_back(instr);
315 } else {
316 // Will compress after insertion. 389 // Will compress after insertion.
317 gap_initialized = false; 390 gap_initialized = false;
318 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]); 391 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
319 } 392 }
320 ParallelMove* moves = instr->GetOrCreateParallelMove( 393 ParallelMove* moves = instr->GetOrCreateParallelMove(
321 static_cast<Instruction::GapPosition>(0), code_zone()); 394 static_cast<Instruction::GapPosition>(0), code_zone());
322 // Delete relevant entries in predecessors and move everything to block. 395 // Delete relevant entries in predecessors and move everything to block.
323 bool first_iteration = true; 396 bool first_iteration = true;
324 for (RpoNumber& pred_index : block->predecessors()) { 397 for (RpoNumber& pred_index : block->predecessors()) {
325 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index); 398 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
326 for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) { 399 for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) {
327 if (move->IsRedundant()) continue; 400 if (move->IsRedundant()) continue;
328 MoveKey key = {move->source(), move->destination()}; 401 MoveKey key = {move->source(), move->destination()};
329 auto it = move_map.find(key); 402 auto it = move_map.find(key);
330 if (it != move_map.end()) { 403 if (it != move_map.end()) {
331 if (first_iteration) { 404 if (first_iteration) {
332 moves->AddMove(move->source(), move->destination()); 405 moves->AddMove(move->source(), move->destination());
333 } 406 }
334 move->Eliminate(); 407 move->Eliminate();
335 } 408 }
336 } 409 }
337 first_iteration = false; 410 first_iteration = false;
338 } 411 }
339 // Compress. 412 // Compress.
340 if (!gap_initialized) { 413 if (!gap_initialized) {
341 CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]); 414 CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
342 } 415 }
416 CompressBlock(block);
343 } 417 }
344 418
345 419
346 namespace { 420 namespace {
347 421
348 bool IsSlot(const InstructionOperand& op) { 422 bool IsSlot(const InstructionOperand& op) {
349 return op.IsStackSlot() || op.IsDoubleStackSlot(); 423 return op.IsStackSlot() || op.IsDoubleStackSlot();
350 } 424 }
351 425
352 426
353 bool LoadCompare(const MoveOperands* a, const MoveOperands* b) { 427 bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
354 if (!a->source().EqualsCanonicalized(b->source())) { 428 if (!a->source().EqualsCanonicalized(b->source())) {
355 return a->source().CompareCanonicalized(b->source()); 429 return a->source().CompareCanonicalized(b->source());
356 } 430 }
357 if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false; 431 if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
358 if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true; 432 if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
359 return a->destination().CompareCanonicalized(b->destination()); 433 return a->destination().CompareCanonicalized(b->destination());
360 } 434 }
361 435
362 } // namespace 436 } // namespace
363 437
364 438
365 // Split multiple loads of the same constant or stack slot off into the second 439 // Split multiple loads of the same constant or stack slot off into the second
366 // slot and keep remaining moves in the first slot. 440 // slot and keep remaining moves in the first slot.
367 void MoveOptimizer::FinalizeMoves(Instruction* instr) { 441 void MoveOptimizer::FinalizeMoves(Instruction* instr) {
368 MoveOpVector& loads = local_vector(); 442 MoveOpVector& loads = local_vector();
369 DCHECK(loads.empty()); 443 DCHECK(loads.empty());
370 444
445 ParallelMove* parallel_moves = instr->parallel_moves()[0];
446 if (parallel_moves == nullptr) return;
371 // Find all the loads. 447 // Find all the loads.
372 for (MoveOperands* move : *instr->parallel_moves()[0]) { 448 for (MoveOperands* move : *parallel_moves) {
373 if (move->IsRedundant()) continue; 449 if (move->IsRedundant()) continue;
374 if (move->source().IsConstant() || IsSlot(move->source())) { 450 if (move->source().IsConstant() || IsSlot(move->source())) {
375 loads.push_back(move); 451 loads.push_back(move);
376 } 452 }
377 } 453 }
378 if (loads.empty()) return; 454 if (loads.empty()) return;
379 // Group the loads by source, moving the preferred destination to the 455 // Group the loads by source, moving the preferred destination to the
380 // beginning of the group. 456 // beginning of the group.
381 std::sort(loads.begin(), loads.end(), LoadCompare); 457 std::sort(loads.begin(), loads.end(), LoadCompare);
382 MoveOperands* group_begin = nullptr; 458 MoveOperands* group_begin = nullptr;
(...skipping 11 matching lines...) Expand all
394 static_cast<Instruction::GapPosition>(1), code_zone()); 470 static_cast<Instruction::GapPosition>(1), code_zone());
395 slot_1->AddMove(group_begin->destination(), load->destination()); 471 slot_1->AddMove(group_begin->destination(), load->destination());
396 load->Eliminate(); 472 load->Eliminate();
397 } 473 }
398 loads.clear(); 474 loads.clear();
399 } 475 }
400 476
401 } // namespace compiler 477 } // namespace compiler
402 } // namespace internal 478 } // namespace internal
403 } // namespace v8 479 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698