OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2011 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "ia32/lithium-gap-resolver-ia32.h" | |
29 #include "ia32/lithium-codegen-ia32.h" | |
30 | |
31 namespace v8 { | |
32 namespace internal { | |
33 | |
34 LGapResolver::LGapResolver(LCodeGen* owner) | |
35 : cgen_(owner), moves_(32), spilled_register_(-1) { | |
William Hesse
2011/01/17 10:43:43
Can we use no_reg, or no_reg.code(), for no spille
Kevin Millikin (Chromium)
2011/01/17 11:18:17
That seems like a subtle type error. This is an a
| |
36 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | |
37 source_uses_[i] = 0; | |
38 destination_uses_[i] = 0; | |
39 } | |
40 } | |
41 | |
42 | |
43 void LGapResolver::Resolve(LParallelMove* parallel_move) { | |
44 ASSERT(HasBeenReset()); | |
45 // Build up a worklist of moves. | |
46 BuildInitialMoveList(parallel_move); | |
47 | |
48 for (int i = moves_.length() - 1; i >= 0; --i) { | |
49 LMoveOperands move = moves_[i]; | |
50 // Skip constants to perform them last. They don't block other moves | |
51 // and skipping such moves with register destinations keeps those | |
52 // registers free for the whole algorithm. | |
53 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { | |
54 PerformMove(i); | |
55 } | |
56 } | |
57 | |
58 // Perform the moves with constant sources. | |
59 for (int i = moves_.length() - 1; i >= 0; --i) { | |
60 if (!moves_[i].IsEliminated()) { | |
61 ASSERT(moves_[i].source()->IsConstantOperand()); | |
62 EmitMove(i); | |
63 } | |
64 } | |
65 | |
66 Finish(); | |
67 ASSERT(HasBeenReset()); | |
68 } | |
69 | |
70 | |
71 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { | |
72 // Perform a linear sweep of the moves to add them to the initial list of | |
73 // moves to perform, ignoring any move that is redundant (the source is | |
74 // the same as the destination, the destination is ignored and | |
75 // unallocated, or the move was already eliminated). | |
76 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); | |
77 for (int i = moves->length() - 1; i >= 0; --i) { | |
78 LMoveOperands move = moves->at(i); | |
79 if (!move.IsRedundant()) AddMove(move); | |
80 } | |
81 } | |
82 | |
83 | |
84 void LGapResolver::PerformMove(int index) { | |
fschneider
2011/01/17 09:44:16
Would it make the code more concise by passing a L
Kevin Millikin (Chromium)
2011/01/17 10:13:09
A little, but I'm nervous about passing addresses
| |
85 // Each call to this function performs a move and deletes it from the move | |
86 // graph. We first recursively perform any move blocking this one. We | |
87 // mark a move as "pending" on entry to PerformMove in order to detect | |
88 // cycles in the move graph. We use operand swaps to resolve cycles, | |
89 // which means that a call to PerformMove could change any source operand | |
90 // in the move graph. | |
91 | |
92 ASSERT(!moves_[index].IsRedundant()); | |
fschneider
2011/01/17 09:44:16
Can we ASSSERT(!moves_[index].IsPending()) here?
Kevin Millikin (Chromium)
2011/01/17 10:13:09
I agree the predicates are confusing (one has to r
| |
93 | |
94 // Clear this move's destination to indicate a pending move. The actual | |
95 // destination is saved on the side. | |
96 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. | |
97 LOperand* destination = moves_[index].destination(); | |
98 moves_[index].set_destination(NULL); | |
99 | |
100 // Perform a depth-first traversal of the move graph to resolve | |
101 // dependencies. Any unperformed, unpending move with a source the same | |
102 // as this one's destination blocks this one so recursively perform all | |
103 // such moves. | |
104 for (int i = moves_.length() - 1; i >= 0; --i) { | |
fschneider
2011/01/17 09:44:16
Reason for iterating in reverse order? Maybe add a
Kevin Millikin (Chromium)
2011/01/17 10:13:09
No reason, order is abitrary. I'll just stick wit
William Hesse
2011/01/17 10:43:43
Do we have the source use counts at this point? I
Kevin Millikin (Chromium)
2011/01/17 11:18:17
We have source use counts for the general purpose
| |
105 LMoveOperands other_move = moves_[i]; | |
106 if (other_move.Blocks(destination) && !other_move.IsPending()) { | |
107 // Though PerformMove can change any source operand in the move graph, | |
108 // this call cannot create a blocking move via a swap (this loop does | |
109 // not miss any). Assume there is a non-blocking move with source A | |
110 // and this move is blocked on source B and there is a swap of A and | |
111 // B. Then A and B must be involved in the same cycle (or they would | |
112 // not be swapped). Since this move's destination is B and there is | |
113 // only a single incoming edge to an operand, this move must also be | |
114 // involved in the same cycle. In that case, the blocking move will | |
115 // be created but will be "pending" when we return from PerformMove. | |
116 PerformMove(i); | |
117 } | |
118 } | |
119 | |
120 // We are about to resolve this move and don't need it marked as | |
121 // pending, so restore its destination. | |
122 moves_[index].set_destination(destination); | |
123 | |
124 // This move's source may have changed due to swaps to resolve cycles and | |
125 // so it may now be the last move in the cycle. If so remove it. | |
126 if (moves_[index].source()->Equals(destination)) { | |
127 RemoveMove(index); | |
128 return; | |
129 } | |
130 | |
131 // The move may be blocked on a (at most one) pending move, in which case | |
132 // we have a cycle. Search for such a blocking move and perform a swap to | |
133 // resolve it. | |
134 for (int i = moves_.length() - 1; i >= 0; --i) { | |
135 LMoveOperands other_move = moves_[i]; | |
136 if (other_move.Blocks(destination)) { | |
137 ASSERT(other_move.IsPending()); | |
138 EmitSwap(index); | |
139 return; | |
140 } | |
141 } | |
142 | |
143 // This move is not blocked. | |
144 EmitMove(index); | |
145 } | |
146 | |
147 | |
148 void LGapResolver::AddMove(LMoveOperands move) { | |
fschneider
2011/01/17 09:44:16
Maybe pass a LMoveOperands* here.
Kevin Millikin (Chromium)
2011/01/17 10:13:09
I'd rather not. We don't need to mutate it and it
| |
149 LOperand* source = move.source(); | |
150 if (source->IsRegister()) ++source_uses_[source->index()]; | |
151 | |
152 LOperand* destination = move.destination(); | |
153 if (destination->IsRegister()) ++destination_uses_[destination->index()]; | |
154 | |
155 moves_.Add(move); | |
156 } | |
157 | |
158 | |
159 void LGapResolver::RemoveMove(int index) { | |
160 LOperand* source = moves_[index].source(); | |
161 if (source->IsRegister()) { | |
162 --source_uses_[source->index()]; | |
163 ASSERT(source_uses_[source->index()] >= 0); | |
164 } | |
165 | |
166 LOperand* destination = moves_[index].destination(); | |
167 if (destination->IsRegister()) { | |
168 --destination_uses_[destination->index()]; | |
169 ASSERT(destination_uses_[destination->index()] >= 0); | |
170 } | |
171 | |
172 moves_[index].Eliminate(); | |
173 } | |
174 | |
175 | |
176 int LGapResolver::CountSourceUses(LOperand* operand) { | |
177 int count = 0; | |
178 for (int i = moves_.length() - 1; i >= 0; --i) { | |
179 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { | |
180 ++count; | |
181 } | |
182 } | |
183 return count; | |
184 } | |
185 | |
186 | |
187 Register LGapResolver::GetFreeRegisterNot(Register reg) { | |
188 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); | |
189 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | |
190 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { | |
191 return Register::FromAllocationIndex(i); | |
192 } | |
193 } | |
194 return no_reg; | |
195 } | |
196 | |
197 | |
198 bool LGapResolver::HasBeenReset() { | |
199 if (!moves_.is_empty()) return false; | |
200 if (spilled_register_ >= 0) return false; | |
201 | |
202 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | |
203 if (source_uses_[i] != 0) return false; | |
204 if (destination_uses_[i] != 0) return false; | |
205 } | |
206 return true; | |
207 } | |
208 | |
209 | |
210 #define __ ACCESS_MASM(cgen_->masm()) | |
211 | |
212 void LGapResolver::Finish() { | |
213 if (spilled_register_ >= 0) { | |
214 __ pop(Register::FromAllocationIndex(spilled_register_)); | |
215 spilled_register_ = -1; | |
216 } | |
217 moves_.Rewind(0); | |
William Hesse
2011/01/17 10:43:43
Shouldn't we have a List::Clear for Rewind(0), and
Kevin Millikin (Chromium)
2011/01/17 11:18:17
We have List::Clear but it does something differen
| |
218 } | |
219 | |
220 | |
221 void LGapResolver::EnsureRestored(LOperand* operand) { | |
222 if (operand->IsRegister() && operand->index() == spilled_register_) { | |
223 __ pop(Register::FromAllocationIndex(spilled_register_)); | |
224 spilled_register_ = -1; | |
225 } | |
226 } | |
227 | |
228 | |
229 Register LGapResolver::EnsureTempRegister() { | |
230 // 1. We may have already spilled to create a temp register. | |
231 if (spilled_register_ >= 0) { | |
232 return Register::FromAllocationIndex(spilled_register_); | |
233 } | |
234 | |
235 // 2. We may have a free register that we can use without spilling. | |
236 Register free = GetFreeRegisterNot(no_reg); | |
237 if (!free.is(no_reg)) return free; | |
238 | |
239 // 3. Prefer to spill a register that is not used in any remaining move | |
240 // because it will not need to be restored until the end. | |
241 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | |
242 if (source_uses_[i] == 0 && destination_uses_[i] == 0) { | |
243 Register scratch = Register::FromAllocationIndex(i); | |
244 __ push(scratch); | |
245 spilled_register_ = i; | |
246 return scratch; | |
247 } | |
248 } | |
249 | |
250 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. | |
251 Register scratch = Register::FromAllocationIndex(0); | |
252 __ push(scratch); | |
253 spilled_register_ = 0; | |
254 return scratch; | |
255 } | |
256 | |
257 | |
258 void LGapResolver::EmitMove(int index) { | |
259 LOperand* source = moves_[index].source(); | |
260 LOperand* destination = moves_[index].destination(); | |
261 EnsureRestored(source); | |
262 EnsureRestored(destination); | |
263 | |
264 // Dispatch on the source and destination operand kinds. Not all | |
265 // combinations are possible. | |
William Hesse
2011/01/17 10:43:43
Are only combinations prohibited by the double vs
Kevin Millikin (Chromium)
2011/01/17 11:18:17
The first.
| |
266 if (source->IsRegister()) { | |
267 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | |
268 Register src = cgen_->ToRegister(source); | |
269 Operand dst = cgen_->ToOperand(destination); | |
270 __ mov(dst, src); | |
271 | |
272 } else if (source->IsStackSlot()) { | |
273 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | |
274 Operand src = cgen_->ToOperand(source); | |
275 if (destination->IsRegister()) { | |
276 Register dst = cgen_->ToRegister(destination); | |
277 __ mov(dst, src); | |
278 } else { | |
279 // Spill on demand to use a temporary register for memory-to-memory | |
280 // moves. | |
281 Register tmp = EnsureTempRegister(); | |
282 Operand dst = cgen_->ToOperand(destination); | |
283 __ mov(tmp, src); | |
284 __ mov(dst, tmp); | |
285 } | |
286 | |
287 } else if (source->IsConstantOperand()) { | |
288 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | |
289 Immediate src = cgen_->ToImmediate(source); | |
290 Operand dst = cgen_->ToOperand(destination); | |
291 __ mov(dst, src); | |
292 | |
293 } else if (source->IsDoubleRegister()) { | |
294 ASSERT(destination->IsDoubleRegister() || | |
295 destination->IsDoubleStackSlot()); | |
296 XMMRegister src = cgen_->ToDoubleRegister(source); | |
297 Operand dst = cgen_->ToOperand(destination); | |
298 __ movdbl(dst, src); | |
299 | |
300 } else if (source->IsDoubleStackSlot()) { | |
301 ASSERT(destination->IsDoubleRegister() || | |
302 destination->IsDoubleStackSlot()); | |
303 Operand src = cgen_->ToOperand(source); | |
304 if (destination->IsDoubleRegister()) { | |
305 XMMRegister dst = cgen_->ToDoubleRegister(destination); | |
306 __ movdbl(dst, src); | |
307 } else { | |
308 // We rely on having xmm0 available as a fixed scratch register. | |
309 Operand dst = cgen_->ToOperand(destination); | |
310 __ movdbl(xmm0, src); | |
311 __ movdbl(dst, xmm0); | |
312 } | |
313 | |
314 } else { | |
315 UNREACHABLE(); | |
316 } | |
317 | |
318 RemoveMove(index); | |
319 } | |
320 | |
321 | |
322 void LGapResolver::EmitSwap(int index) { | |
323 LOperand* source = moves_[index].source(); | |
324 LOperand* destination = moves_[index].destination(); | |
325 EnsureRestored(source); | |
326 EnsureRestored(destination); | |
327 | |
328 // Dispatch on the source and destination operand kinds. Not all | |
329 // combinations are possible. | |
330 if (source->IsRegister() && destination->IsRegister()) { | |
331 // Register-register. | |
332 Register src = cgen_->ToRegister(source); | |
333 Register dst = cgen_->ToRegister(destination); | |
334 __ xchg(dst, src); | |
335 | |
336 } else if ((source->IsRegister() && destination->IsStackSlot()) || | |
337 (source->IsStackSlot() && destination->IsRegister())) { | |
338 // Register-memory. Use a free register as a temp if possible. Do not | |
339 // spill on demand because the simple spill implementation cannot avoid | |
340 // spilling src at this point. Memory-to-memory swaps are a rare case. | |
William Hesse
2011/01/17 10:43:43
Why is the memory-to-memory comment here?
| |
341 Register tmp = GetFreeRegisterNot(no_reg); | |
342 Register reg = | |
343 cgen_->ToRegister(source->IsRegister() ? source : destination); | |
344 Operand mem = | |
345 cgen_->ToOperand(source->IsRegister() ? destination : source); | |
346 if (tmp.is(no_reg)) { | |
347 __ xor_(reg, mem); | |
348 __ xor_(mem, reg); | |
349 __ xor_(reg, mem); | |
350 } else { | |
351 __ mov(tmp, mem); | |
352 __ mov(mem, reg); | |
353 __ mov(reg, tmp); | |
354 } | |
355 | |
356 } else if (source->IsStackSlot() && destination->IsStackSlot()) { | |
357 // Memory-memory. Spill on demand to use a temporary. If there is a | |
358 // free register after that, use it as a second temporary. | |
359 Register tmp0 = EnsureTempRegister(); | |
360 Register tmp1 = GetFreeRegisterNot(tmp0); | |
361 Operand src = cgen_->ToOperand(source); | |
362 Operand dst = cgen_->ToOperand(destination); | |
363 if (tmp1.is(no_reg)) { | |
364 // Only one temp register available to us. | |
365 __ mov(tmp0, dst); | |
366 __ xor_(tmp0, src); | |
367 __ xor_(src, tmp0); | |
368 __ xor_(tmp0, src); | |
369 __ mov(dst, tmp0); | |
370 } else { | |
371 __ mov(tmp0, dst); | |
372 __ mov(tmp1, src); | |
373 __ mov(dst, tmp1); | |
374 __ mov(src, tmp0); | |
375 } | |
376 | |
377 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { | |
378 // XMM register-register or register-memory. We rely on having xmm0 | |
379 // available as a fixed scratch register. | |
380 ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot()); | |
381 ASSERT(destination->IsDoubleRegister() || | |
382 destination->IsDoubleStackSlot()); | |
383 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() | |
384 ? source | |
385 : destination); | |
386 Operand other = | |
387 cgen_->ToOperand(source->IsDoubleRegister() ? destination : source); | |
388 __ movdbl(xmm0, other); | |
389 __ movdbl(other, reg); | |
390 __ movdbl(reg, Operand(xmm0)); | |
391 | |
392 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { | |
393 // Double-width memory-to-memory. Spill on demand to use a general | |
394 // purpose temporary register and also rely on having xmm0 available as | |
395 // a fixed scratch register. | |
396 Register tmp = EnsureTempRegister(); | |
397 Operand src0 = cgen_->ToOperand(source); | |
398 Operand src1 = cgen_->HighOperand(source); | |
399 Operand dst0 = cgen_->ToOperand(destination); | |
400 Operand dst1 = cgen_->HighOperand(destination); | |
401 __ movdbl(xmm0, dst0); // Save destination in xmm0. | |
402 __ mov(tmp, src0); // Then use tmp to copy source to destination. | |
403 __ mov(dst0, tmp); | |
404 __ mov(tmp, src1); | |
405 __ mov(dst1, tmp); | |
406 __ movdbl(src0, xmm0); | |
407 | |
408 } else { | |
409 // No other combinations are possible. | |
410 UNREACHABLE(); | |
411 } | |
412 | |
413 // The swap of source and destination has executed a move from source to | |
414 // destination. | |
415 RemoveMove(index); | |
416 | |
417 // Any unperformed (including pending) move with a source of either | |
418 // this move's source or destination needs to have their source | |
419 // changed to reflect the state of affairs after the swap. | |
420 for (int j = moves_.length() - 1; j >= 0; --j) { | |
421 LMoveOperands other_move = moves_[j]; | |
422 if (other_move.Blocks(source)) { | |
423 moves_[j].set_source(destination); | |
424 } else if (other_move.Blocks(destination)) { | |
425 moves_[j].set_source(source); | |
426 } | |
427 } | |
428 | |
429 // In addition to swapping the actual uses as sources, we need to update | |
430 // the use counts. | |
431 if (source->IsRegister() && destination->IsRegister()) { | |
432 int temp = source_uses_[source->index()]; | |
433 source_uses_[source->index()] = source_uses_[destination->index()]; | |
434 source_uses_[destination->index()] = temp; | |
435 } else if (source->IsRegister()) { | |
436 // We don't have use counts for non-register operands like destination. | |
437 // Compute those counts now. | |
438 source_uses_[source->index()] = CountSourceUses(source); | |
439 } else if (destination->IsRegister()) { | |
440 source_uses_[destination->index()] = CountSourceUses(destination); | |
441 } | |
442 } | |
443 | |
444 #undef __ | |
445 | |
446 } } // namespace v8::internal | |
OLD | NEW |