OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2011 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "ia32/lithium-gap-resolver-ia32.h" | |
29 #include "ia32/lithium-codegen-ia32.h" | |
30 | |
31 namespace v8 { | |
32 namespace internal { | |
33 | |
34 LGapResolver::LGapResolver(LCodeGen* owner) | |
35 : cgen_(owner), moves_(32), spilled_register_(-1) { | |
36 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | |
37 general_source_uses_[i] = 0; | |
38 general_destination_uses_[i] = 0; | |
39 } | |
40 for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { | |
41 double_source_uses_[i] = 0; | |
42 double_destination_uses_[i] = 0; | |
43 } | |
44 } | |
45 | |
46 | |
47 void LGapResolver::Resolve(LParallelMove* parallel_move) { | |
48 ASSERT(HasBeenReset()); | |
49 // Build up a worklist of moves. | |
50 BuildInitialMoveList(parallel_move); | |
51 | |
52 for (int i = moves_.length() - 1; i >= 0; --i) { | |
53 LMoveOperands move = moves_[i]; | |
54 // Skip constants to perform them last. They don't block other moves | |
55 // and skipping such moves with register destinations keeps those | |
56 // registers free for the whole algorithm. | |
57 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { | |
58 PerformMove(i); | |
59 } | |
60 } | |
61 | |
62 // Perform the moves with constant sources. | |
63 for (int i = moves_.length() - 1; i >= 0; --i) { | |
64 if (!moves_[i].IsEliminated()) { | |
fschneider
2011/01/17 09:44:16
Not sure: can a move with a constant source operan
Kevin Millikin (Chromium)
2011/01/17 10:13:08
Only if it's the move passed to PerformMove.
| |
65 ASSERT(moves_[i].source()->IsConstantOperand()); | |
66 EmitMove(i); | |
67 } | |
68 } | |
69 | |
70 Finish(); | |
71 ASSERT(HasBeenReset()); | |
72 } | |
73 | |
74 | |
75 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { | |
76 // Perform a linear sweep of the moves to add them to the initial list of | |
77 // moves to perform, ignoring any move that is redundant (the source is | |
78 // the same as the destination, the destination is ignored and | |
79 // unallocated, or the move was already eliminated). | |
80 // | |
81 // Inform the move assembler of each move that will eventually be | |
82 // performed so it can track the registers used. | |
83 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); | |
84 for (int i = moves->length() - 1; i >= 0; --i) { | |
85 LMoveOperands move = moves->at(i); | |
86 if (!move.IsRedundant()) AddMove(move); | |
87 } | |
fschneider
2011/01/17 09:44:16
How about a slot assert that verifies the move-gra
Kevin Millikin (Chromium)
2011/01/17 10:13:08
Good idea. I will add it.
| |
88 } | |
89 | |
90 | |
91 void LGapResolver::PerformMove(int index) { | |
92 // Each call to this function performs a move and deletes it from the move | |
93 // graph. We first recursively perform any move blocking this one. We | |
94 // mark a move as "pending" on entry to PerformMove in order to detect | |
95 // cycles in the move graph. We use operand swaps to resolve cycles, | |
96 // which means that a call to PerformMove could change any source operand | |
97 // in the move graph. | |
98 | |
99 ASSERT(!moves_[index].IsRedundant()); | |
100 | |
101 // Clear this move's destination to indicate a pending move. The actual | |
102 // destination is saved on the side. | |
103 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. | |
104 LOperand* destination = moves_[index].destination(); | |
105 moves_[index].set_destination(NULL); | |
106 | |
107 // Perform a depth-first traversal of the move graph to resolve | |
108 // dependencies. Any unperformed, unpending move with a source the same | |
109 // as this one's destination blocks this one so recursively perform all | |
110 // such moves. | |
111 for (int i = moves_.length() - 1; i >= 0; --i) { | |
112 LMoveOperands other_move = moves_[i]; | |
113 if (other_move.Blocks(destination) && !other_move.IsPending()) { | |
114 // Though PerformMove can change any source operand in the move graph, | |
115 // this call cannot create a blocking move via a swap (this loop does | |
116 // not miss any). Assume there is a non-blocking move with source A | |
117 // and this move is blocked on source B and there is a swap of A and | |
118 // B. Then A and B must be involved in the same cycle (or they would | |
119 // not be swapped). Since this move's destination is B and there is | |
120 // only a single incoming edge to an operand, this move must also be | |
121 // involved in the same cycle. In that case, the blocking move will | |
122 // be created but will be "pending" when we return from PerformMove. | |
123 PerformMove(i); | |
124 } | |
125 } | |
126 | |
127 // We are about to resolve this move and don't need it marked as | |
128 // pending, so restore its destination. | |
129 moves_[index].set_destination(destination); | |
130 | |
131 // This move's source may have changed due to swaps to resolve cycles and | |
132 // so it may now be the last move in the cycle. If so remove it. | |
133 if (moves_[index].source()->Equals(destination)) { | |
134 RemoveMove(index); | |
135 return; | |
136 } | |
137 | |
138 // The move may be blocked on a (at most one) pending move, in which case | |
139 // we have a cycle. Search for such a blocking move and perform a swap to | |
140 // resolve it. | |
141 for (int i = moves_.length() - 1; i >= 0; --i) { | |
142 LMoveOperands other_move = moves_[i]; | |
143 if (other_move.Blocks(destination)) { | |
144 ASSERT(other_move.IsPending()); | |
145 EmitSwap(index); | |
146 return; | |
147 } | |
148 } | |
149 | |
150 // This move is not blocked. | |
151 EmitMove(index); | |
152 } | |
153 | |
154 | |
155 void LGapResolver::AddMove(LMoveOperands move) { | |
156 LOperand* source = move.source(); | |
157 if (source->IsRegister()) { | |
158 ++general_source_uses_[source->index()]; | |
159 } else if (source->IsDoubleRegister()) { | |
160 ++double_source_uses_[source->index()]; | |
161 } | |
162 | |
163 LOperand* destination = move.destination(); | |
164 if (destination->IsRegister()) { | |
165 ++general_destination_uses_[destination->index()]; | |
166 } else if (destination->IsDoubleRegister()) { | |
167 ++double_destination_uses_[destination->index()]; | |
168 } | |
169 | |
170 moves_.Add(move); | |
171 } | |
172 | |
173 | |
174 void LGapResolver::RemoveMove(int index) { | |
175 LOperand* source = moves_[index].source(); | |
176 if (source->IsRegister()) { | |
177 --general_source_uses_[source->index()]; | |
178 ASSERT(general_source_uses_[source->index()] >= 0); | |
179 } else if (source->IsDoubleRegister()) { | |
180 --double_source_uses_[source->index()]; | |
181 ASSERT(double_source_uses_[source->index()] >= 0); | |
182 } | |
183 | |
184 LOperand* destination = moves_[index].destination(); | |
185 if (destination->IsRegister()) { | |
186 --general_destination_uses_[destination->index()]; | |
187 ASSERT(general_destination_uses_[destination->index()] >= 0); | |
188 } else if (destination->IsDoubleRegister()) { | |
189 --double_destination_uses_[destination->index()]; | |
190 ASSERT(double_destination_uses_[destination->index()] >= 0); | |
191 } | |
192 | |
193 moves_[index].Eliminate(); | |
194 } | |
195 | |
196 | |
197 int LGapResolver::CountSourceUses(LOperand* operand) { | |
198 int count = 0; | |
199 for (int i = moves_.length() - 1; i >= 0; --i) { | |
200 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { | |
201 ++count; | |
202 } | |
203 } | |
204 return count; | |
205 } | |
206 | |
207 | |
208 Register LGapResolver::GetFreeRegisterNot(Register reg) { | |
209 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); | |
210 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | |
211 if (general_source_uses_[i] == 0 && | |
212 general_destination_uses_[i] > 0 && | |
213 i != skip_index) { | |
214 return Register::FromAllocationIndex(i); | |
215 } | |
216 } | |
217 return no_reg; | |
218 } | |
219 | |
220 | |
221 bool LGapResolver::HasBeenReset() { | |
222 if (!moves_.is_empty()) return false; | |
223 if (spilled_register_ >= 0) return false; | |
224 | |
225 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | |
226 if (general_source_uses_[i] != 0) return false; | |
227 if (general_destination_uses_[i] != 0) return false; | |
228 } | |
229 for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { | |
230 if (double_source_uses_[i] != 0) return false; | |
231 if (double_destination_uses_[i] != 0) return false; | |
232 } | |
233 return true; | |
234 } | |
235 | |
236 | |
237 #define __ ACCESS_MASM(cgen_->masm()) | |
238 | |
239 void LGapResolver::Finish() { | |
240 if (spilled_register_ >= 0) { | |
241 __ pop(Register::FromAllocationIndex(spilled_register_)); | |
242 spilled_register_ = -1; | |
243 } | |
244 moves_.Rewind(0); | |
245 } | |
246 | |
247 | |
248 void LGapResolver::EnsureRestored(LOperand* operand) { | |
249 if (operand->IsRegister() && operand->index() == spilled_register_) { | |
250 __ pop(Register::FromAllocationIndex(spilled_register_)); | |
251 spilled_register_ = -1; | |
252 } | |
253 } | |
254 | |
255 | |
256 Register LGapResolver::EnsureTempRegister() { | |
257 // 1. We may have already spilled to create a temp register. | |
258 if (spilled_register_ >= 0) { | |
259 return Register::FromAllocationIndex(spilled_register_); | |
260 } | |
261 | |
262 // 2. We may have a free register that we can use without spilling. | |
263 Register free = GetFreeRegisterNot(no_reg); | |
264 if (!free.is(no_reg)) return free; | |
265 | |
266 // 3. Prefer to spill a register that is not used in any remaining move | |
267 // because it will not need to be restored until the end. | |
268 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | |
269 if (general_source_uses_[i] == 0 && general_destination_uses_[i] == 0) { | |
270 Register scratch = Register::FromAllocationIndex(i); | |
271 __ push(scratch); | |
272 spilled_register_ = i; | |
273 return scratch; | |
274 } | |
275 } | |
276 | |
277 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. | |
278 Register scratch = Register::FromAllocationIndex(0); | |
279 __ push(scratch); | |
280 spilled_register_ = 0; | |
281 return scratch; | |
282 } | |
283 | |
284 | |
285 void LGapResolver::EmitMove(int index) { | |
286 LOperand* source = moves_[index].source(); | |
287 LOperand* destination = moves_[index].destination(); | |
288 EnsureRestored(source); | |
289 EnsureRestored(destination); | |
290 | |
291 // Dispatch on the source and destination operand kinds. Not all | |
292 // combinations are possible. | |
293 if (source->IsRegister()) { | |
294 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | |
295 Register src = cgen_->ToRegister(source); | |
296 Operand dst = cgen_->ToOperand(destination); | |
297 __ mov(dst, src); | |
298 | |
299 } else if (source->IsStackSlot()) { | |
300 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | |
301 Operand src = cgen_->ToOperand(source); | |
302 if (destination->IsRegister()) { | |
303 Register dst = cgen_->ToRegister(destination); | |
304 __ mov(dst, src); | |
305 } else { | |
306 // Spill on demand to use a temporary register for memory-to-memory | |
307 // moves. | |
308 Register tmp = EnsureTempRegister(); | |
309 Operand dst = cgen_->ToOperand(destination); | |
310 __ mov(tmp, src); | |
311 __ mov(dst, tmp); | |
312 } | |
313 | |
314 } else if (source->IsConstantOperand()) { | |
315 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | |
316 Immediate src = cgen_->ToImmediate(source); | |
317 Operand dst = cgen_->ToOperand(destination); | |
318 __ mov(dst, src); | |
319 | |
320 } else if (source->IsDoubleRegister()) { | |
321 ASSERT(destination->IsDoubleRegister() || | |
322 destination->IsDoubleStackSlot()); | |
323 XMMRegister src = cgen_->ToDoubleRegister(source); | |
324 Operand dst = cgen_->ToOperand(destination); | |
325 __ movdbl(dst, src); | |
326 | |
327 } else if (source->IsDoubleStackSlot()) { | |
328 ASSERT(destination->IsDoubleRegister() || | |
329 destination->IsDoubleStackSlot()); | |
330 Operand src = cgen_->ToOperand(source); | |
331 if (destination->IsDoubleRegister()) { | |
332 XMMRegister dst = cgen_->ToDoubleRegister(destination); | |
333 __ movdbl(dst, src); | |
334 } else { | |
335 // We rely on having xmm0 available as a fixed scratch register. | |
336 Operand dst = cgen_->ToOperand(destination); | |
337 __ movdbl(xmm0, src); | |
338 __ movdbl(dst, xmm0); | |
339 } | |
340 | |
341 } else { | |
342 UNREACHABLE(); | |
343 } | |
344 | |
345 RemoveMove(index); | |
346 } | |
347 | |
348 | |
349 void LGapResolver::EmitSwap(int index) { | |
350 LOperand* source = moves_[index].source(); | |
351 LOperand* destination = moves_[index].destination(); | |
352 EnsureRestored(source); | |
353 EnsureRestored(destination); | |
354 | |
355 // Dispatch on the source and destination operand kinds. Not all | |
356 // combinations are possible. | |
357 if (source->IsRegister() && destination->IsRegister()) { | |
358 // Register-register. | |
359 Register src = cgen_->ToRegister(source); | |
360 Register dst = cgen_->ToRegister(destination); | |
361 __ xchg(dst, src); | |
362 | |
363 } else if ((source->IsRegister() && destination->IsStackSlot()) || | |
364 (source->IsStackSlot() && destination->IsRegister())) { | |
365 // Register-memory. Use a free register as a temp if possible. Do not | |
366 // spill on demand because the simple spill implementation cannot avoid | |
367 // spilling src at this point. Memory-to-memory swaps are a rare case. | |
fschneider
2011/01/17 09:44:16
The comment mentions mem-to-mem swaps which are no
Kevin Millikin (Chromium)
2011/01/17 10:13:08
I will fix it.
| |
368 Register tmp = GetFreeRegisterNot(no_reg); | |
369 Register reg = | |
370 cgen_->ToRegister(source->IsRegister() ? source : destination); | |
371 Operand mem = | |
372 cgen_->ToOperand(source->IsRegister() ? destination : source); | |
373 if (tmp.is(no_reg)) { | |
374 __ xor_(reg, mem); | |
375 __ xor_(mem, reg); | |
376 __ xor_(reg, mem); | |
377 } else { | |
378 __ mov(tmp, mem); | |
379 __ mov(mem, reg); | |
380 __ mov(reg, tmp); | |
381 } | |
382 | |
383 } else if (source->IsStackSlot() && destination->IsStackSlot()) { | |
384 // Memory-memory. Spill on demand to use a temporary. If there is a | |
385 // free register after that, use it as a second temporary. | |
386 Register tmp0 = EnsureTempRegister(); | |
387 Register tmp1 = GetFreeRegisterNot(tmp0); | |
388 Operand src = cgen_->ToOperand(source); | |
389 Operand dst = cgen_->ToOperand(destination); | |
390 if (tmp1.is(no_reg)) { | |
391 // Only one temp register available to us. | |
392 __ mov(tmp0, dst); | |
393 __ xor_(tmp0, src); | |
394 __ xor_(src, tmp0); | |
395 __ xor_(tmp0, src); | |
396 __ mov(dst, tmp0); | |
397 } else { | |
398 __ mov(tmp0, dst); | |
399 __ mov(tmp1, src); | |
400 __ mov(dst, tmp1); | |
401 __ mov(src, tmp0); | |
402 } | |
403 | |
404 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { | |
405 // XMM register-register or register-memory. We rely on having xmm0 | |
406 // available as a fixed scratch register. | |
407 ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot()); | |
408 ASSERT(destination->IsDoubleRegister() || | |
409 destination->IsDoubleStackSlot()); | |
410 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() | |
411 ? source | |
412 : destination); | |
413 Operand other = | |
414 cgen_->ToOperand(source->IsDoubleRegister() ? destination : source); | |
415 __ movdbl(xmm0, other); | |
416 __ movdbl(other, reg); | |
417 __ movdbl(reg, Operand(xmm0)); | |
418 | |
419 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { | |
420 UNIMPLEMENTED(); | |
421 | |
422 } else { | |
423 // No other combinations are possible. | |
424 UNREACHABLE(); | |
425 } | |
426 | |
427 // The swap of source and destination has executed a move from source to | |
428 // destination. | |
429 RemoveMove(index); | |
430 | |
431 // Any unperformed (including pending) move with a source of either | |
432 // this move's source or destination needs to have their source | |
433 // changed to reflect the state of affairs after the swap. | |
434 for (int j = moves_.length() - 1; j >= 0; --j) { | |
435 LMoveOperands other_move = moves_[j]; | |
436 if (other_move.Blocks(source)) { | |
437 moves_[j].set_source(destination); | |
438 } else if (other_move.Blocks(destination)) { | |
439 moves_[j].set_source(source); | |
440 } | |
441 } | |
442 | |
443 // In addition to swapping the actual uses as sources, we need to update | |
444 // the use counts. | |
445 if (source->IsRegister() && destination->IsRegister()) { | |
446 int temp = general_source_uses_[source->index()]; | |
447 general_source_uses_[source->index()] = | |
448 general_source_uses_[destination->index()]; | |
449 general_source_uses_[destination->index()] = temp; | |
450 | |
451 } else if (source->IsRegister()) { | |
452 // We don't have use counts for non-register operands like destination. | |
453 // Compute those counts now. | |
454 general_source_uses_[source->index()] = CountSourceUses(source); | |
455 | |
456 } else if (destination->IsRegister()) { | |
457 general_source_uses_[destination->index()] = CountSourceUses(destination); | |
458 | |
459 } else if (source->IsDoubleRegister() && | |
460 destination->IsDoubleRegister()) { | |
461 int temp = double_source_uses_[source->index()]; | |
462 double_source_uses_[source->index()] = | |
463 double_source_uses_[destination->index()]; | |
464 double_source_uses_[destination->index()] = temp; | |
465 | |
466 } else if (source->IsDoubleRegister()) { | |
467 double_source_uses_[source->index()] = CountSourceUses(source); | |
468 | |
469 } else if (destination->IsDoubleRegister()) { | |
470 double_source_uses_[destination->index()] = CountSourceUses(destination); | |
471 } | |
472 } | |
473 | |
474 #undef __ | |
475 | |
476 } } // namespace v8::internal | |
OLD | NEW |