|
OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2011 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "x64/lithium-gap-resolver-x64.h" | |
29 #include "x64/lithium-codegen-x64.h" | |
30 | |
31 namespace v8 { | |
32 namespace internal { | |
33 | |
34 LGapResolver::LGapResolver(LCodeGen* owner) | |
35 : cgen_(owner), moves_(32) {} | |
36 | |
37 | |
38 void LGapResolver::Resolve(LParallelMove* parallel_move) { | |
39 ASSERT(moves_.is_empty()); | |
40 // Build up a worklist of moves. | |
41 BuildInitialMoveList(parallel_move); | |
42 | |
43 for (int i = 0; i < moves_.length(); ++i) { | |
44 LMoveOperands move = moves_[i]; | |
45 // Skip constants to perform them last. They don't block other moves | |
46 // and skipping such moves with register destinations keeps those | |
47 // registers free for the whole algorithm. | |
48 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { | |
49 PerformMove(i); | |
50 } | |
51 } | |
52 | |
53 // Perform the moves with constant sources. | |
54 for (int i = 0; i < moves_.length(); ++i) { | |
55 if (!moves_[i].IsEliminated()) { | |
56 ASSERT(moves_[i].source()->IsConstantOperand()); | |
57 EmitMove(i); | |
58 } | |
59 } | |
60 | |
61 moves_.Rewind(0); | |
62 } | |
63 | |
64 | |
65 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { | |
66 // Perform a linear sweep of the moves to add them to the initial list of | |
67 // moves to perform, ignoring any move that is redundant (the source is | |
68 // the same as the destination, the destination is ignored and | |
69 // unallocated, or the move was already eliminated). | |
70 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); | |
71 for (int i = 0; i < moves->length(); ++i) { | |
72 LMoveOperands move = moves->at(i); | |
73 if (!move.IsRedundant()) moves_.Add(move); | |
74 } | |
75 Verify(); | |
76 } | |
77 | |
78 | |
79 void LGapResolver::PerformMove(int index) { | |
80 // Each call to this function performs a move and deletes it from the move | |
81 // graph. We first recursively perform any move blocking this one. We | |
82 // mark a move as "pending" on entry to PerformMove in order to detect | |
83 // cycles in the move graph. We use operand swaps to resolve cycles, | |
84 // which means that a call to PerformMove could change any source operand | |
85 // in the move graph. | |
86 | |
87 ASSERT(!moves_[index].IsPending()); | |
88 ASSERT(!moves_[index].IsRedundant()); | |
89 | |
90 // Clear this move's destination to indicate a pending move. The actual | |
91 // destination is saved in a stack-allocated local. Recursion may allow | |
92 // multiple moves to be pending. | |
93 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. | |
94 LOperand* destination = moves_[index].destination(); | |
95 moves_[index].set_destination(NULL); | |
96 | |
97 // Perform a depth-first traversal of the move graph to resolve | |
98 // dependencies. Any unperformed, unpending move with a source the same | |
99 // as this one's destination blocks this one so recursively perform all | |
100 // such moves. | |
101 for (int i = 0; i < moves_.length(); ++i) { | |
102 LMoveOperands other_move = moves_[i]; | |
103 if (other_move.Blocks(destination) && !other_move.IsPending()) { | |
104 // Though PerformMove can change any source operand in the move graph, | |
105 // this call cannot create a blocking move via a swap (this loop does | |
106 // not miss any). Assume there is a non-blocking move with source A | |
107 // and this move is blocked on source B and there is a swap of A and | |
108 // B. Then A and B must be involved in the same cycle (or they would | |
109 // not be swapped). Since this move's destination is B and there is | |
110 // only a single incoming edge to an operand, this move must also be | |
111 // involved in the same cycle. In that case, the blocking move will | |
112 // be created but will be "pending" when we return from PerformMove. | |
113 PerformMove(i); | |
114 } | |
115 } | |
116 | |
117 // We are about to resolve this move and don't need it marked as | |
118 // pending, so restore its destination. | |
119 moves_[index].set_destination(destination); | |
120 | |
121 // This move's source may have changed due to swaps to resolve cycles and | |
122 // so it may now be the last move in the cycle. If so remove it. | |
123 if (moves_[index].source()->Equals(destination)) { | |
124 moves_[index].Eliminate(); | |
125 return; | |
126 } | |
127 | |
128 // The move may be blocked on a (at most one) pending move, in which case | |
129 // we have a cycle. Search for such a blocking move and perform a swap to | |
130 // resolve it. | |
131 for (int i = 0; i < moves_.length(); ++i) { | |
132 LMoveOperands other_move = moves_[i]; | |
133 if (other_move.Blocks(destination)) { | |
134 ASSERT(other_move.IsPending()); | |
135 EmitSwap(index); | |
136 return; | |
137 } | |
138 } | |
139 | |
140 // This move is not blocked. | |
141 EmitMove(index); | |
142 } | |
143 | |
144 | |
145 void LGapResolver::Verify() { | |
146 #ifdef ENABLE_SLOW_ASSERTS | |
147 // No operand should be the destination for more than one move. | |
148 for (int i = 0; i < moves_.length(); ++i) { | |
149 LOperand* destination = moves_[i].destination(); | |
150 for (int j = i + 1; j < moves_.length(); ++j) { | |
151 SLOW_ASSERT(!destination->Equals(moves_[j].destination())); | |
152 } | |
153 } | |
154 #endif | |
155 } | |
156 | |
157 | |
158 #define __ ACCESS_MASM(cgen_->masm()) | |
159 | |
160 | |
161 void LGapResolver::EmitMove(int index) { | |
162 LOperand* source = moves_[index].source(); | |
163 LOperand* destination = moves_[index].destination(); | |
164 | |
165 // Dispatch on the source and destination operand kinds. Not all | |
166 // combinations are possible. | |
167 if (source->IsRegister()) { | |
168 Register src = cgen_->ToRegister(source); | |
169 if (destination->IsRegister()) { | |
170 Register dst = cgen_->ToRegister(destination); | |
171 __ movq(dst, src); | |
172 } else { | |
173 ASSERT(destination->IsStackSlot()); | |
174 Operand dst = cgen_->ToOperand(destination); | |
175 __ movq(dst, src); | |
176 } | |
177 | |
178 } else if (source->IsStackSlot()) { | |
179 Operand src = cgen_->ToOperand(source); | |
180 if (destination->IsRegister()) { | |
181 Register dst = cgen_->ToRegister(destination); | |
182 __ movq(dst, src); | |
183 } else { | |
184 ASSERT(destination->IsStackSlot()); | |
185 Operand dst = cgen_->ToOperand(destination); | |
186 __ movq(kScratchRegister, src); | |
187 __ movq(dst, kScratchRegister); | |
188 } | |
189 | |
190 } else if (source->IsConstantOperand()) { | |
191 LConstantOperand* constant_source = LConstantOperand::cast(source); | |
192 if (destination->IsRegister()) { | |
193 Register dst = cgen_->ToRegister(destination); | |
194 if (cgen_->IsInteger32Constant(constant_source)) { | |
195 __ movl(dst, Immediate(cgen_->ToInteger32(constant_source))); | |
196 } else { | |
197 __ Move(dst, cgen_->ToHandle(constant_source)); | |
198 } | |
199 } else { | |
200 ASSERT(destination->IsStackSlot()); | |
201 Operand dst = cgen_->ToOperand(destination); | |
202 if (cgen_->IsInteger32Constant(constant_source)) { | |
203 // Allow top 32 bits of an untagged Integer32 to be arbitrary. | |
204 __ movl(dst, Immediate(cgen_->ToInteger32(constant_source))); | |
205 } else { | |
206 __ Move(dst, cgen_->ToHandle(constant_source)); | |
207 } | |
208 } | |
Kevin Millikin (Chromium)
2011/01/24 14:57:12
I guess there should be a blank line after this on
| |
209 } else if (source->IsDoubleRegister()) { | |
210 XMMRegister src = cgen_->ToDoubleRegister(source); | |
211 if (destination->IsDoubleRegister()) { | |
212 __ movsd(cgen_->ToDoubleRegister(destination), src); | |
213 } else { | |
214 ASSERT(destination->IsDoubleStackSlot()); | |
215 __ movsd(cgen_->ToOperand(destination), src); | |
216 } | |
217 } else if (source->IsDoubleStackSlot()) { | |
218 Operand src = cgen_->ToOperand(source); | |
219 if (destination->IsDoubleRegister()) { | |
220 __ movsd(cgen_->ToDoubleRegister(destination), src); | |
221 } else { | |
222 ASSERT(destination->IsDoubleStackSlot()); | |
223 __ movsd(xmm0, src); | |
224 __ movsd(cgen_->ToOperand(destination), xmm0); | |
225 } | |
226 } else { | |
227 UNREACHABLE(); | |
228 } | |
229 | |
230 moves_[index].Eliminate(); | |
231 } | |
232 | |
233 | |
234 void LGapResolver::EmitSwap(int index) { | |
235 LOperand* source = moves_[index].source(); | |
236 LOperand* destination = moves_[index].destination(); | |
237 | |
238 // Dispatch on the source and destination operand kinds. Not all | |
239 // combinations are possible. | |
240 if (source->IsRegister() && destination->IsRegister()) { | |
241 // Register-register. | |
242 Register src = cgen_->ToRegister(source); | |
243 Register dst = cgen_->ToRegister(destination); | |
244 __ xchg(dst, src); | |
245 | |
246 } else if ((source->IsRegister() && destination->IsStackSlot()) || | |
247 (source->IsStackSlot() && destination->IsRegister())) { | |
248 // Register-memory. Use a free register as a temp if possible. Do not | |
Kevin Millikin (Chromium)
2011/01/24 14:57:12
Update the comment so it doesn't mention the spill
William Hesse
2011/01/26 10:05:57
Done.
| |
249 // spill on demand because the simple spill implementation cannot avoid | |
250 // spilling src at this point. | |
251 Register reg = | |
252 cgen_->ToRegister(source->IsRegister() ? source : destination); | |
253 Operand mem = | |
254 cgen_->ToOperand(source->IsRegister() ? destination : source); | |
255 __ movq(kScratchRegister, mem); | |
256 __ movq(mem, reg); | |
257 __ movq(reg, kScratchRegister); | |
258 } else if (source->IsStackSlot() && destination->IsStackSlot()) { | |
Kevin Millikin (Chromium)
2011/01/24 14:57:12
This case could be
} else if ((source->IsStackSlo
William Hesse
2011/01/26 10:05:57
Done.
| |
259 // Memory-memory. | |
260 Operand src = cgen_->ToOperand(source); | |
261 Operand dst = cgen_->ToOperand(destination); | |
262 __ movsd(xmm0, src); | |
263 __ movq(kScratchRegister, dst); | |
264 __ movsd(dst, xmm0); | |
265 __ movq(src, kScratchRegister); | |
266 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { | |
267 // XMM register-register or register-memory. We rely on having xmm0 | |
268 // available as a fixed scratch register. | |
269 ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot()); | |
270 ASSERT(destination->IsDoubleRegister() || | |
271 destination->IsDoubleStackSlot()); | |
272 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() | |
273 ? source | |
274 : destination); | |
275 LOperand* other = source->IsDoubleRegister() ? destination : source; | |
276 if (other->IsDoubleRegister()) { | |
Kevin Millikin (Chromium)
2011/01/24 14:57:12
I think this case (double register/double register
William Hesse
2011/01/26 10:05:57
Done.
| |
277 XMMRegister other_reg = cgen_->ToDoubleRegister(other); | |
278 __ movsd(xmm0, other_reg); | |
279 __ movsd(other_reg, reg); | |
280 __ movsd(reg, xmm0); | |
281 } else { | |
282 ASSERT(other->IsDoubleStackSlot()); | |
283 Operand other_operand = cgen_->ToOperand(other); | |
284 __ movsd(xmm0, other_operand); | |
285 __ movsd(other_operand, reg); | |
286 __ movsd(reg, xmm0); | |
287 } | |
288 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { | |
289 // Double-width memory-to-memory. Spill on demand to use a general | |
Kevin Millikin (Chromium)
2011/01/24 14:57:12
If you don't move this to share the other memory/m
William Hesse
2011/01/26 10:05:57
Done.
| |
290 // purpose temporary register and also rely on having xmm0 available as | |
291 // a fixed scratch register. | |
292 Operand src = cgen_->ToOperand(source); | |
293 Operand dst = cgen_->ToOperand(destination); | |
294 __ movsd(xmm0, dst); | |
295 __ movq(kScratchRegister, src); | |
296 __ movsd(src, xmm0); | |
297 __ movq(dst, kScratchRegister); | |
298 } else { | |
299 // No other combinations are possible. | |
300 UNREACHABLE(); | |
301 } | |
302 | |
303 // The swap of source and destination has executed a move from source to | |
304 // destination. | |
305 moves_[index].Eliminate(); | |
306 | |
307 // Any unperformed (including pending) move with a source of either | |
308 // this move's source or destination needs to have their source | |
309 // changed to reflect the state of affairs after the swap. | |
310 for (int i = 0; i < moves_.length(); ++i) { | |
311 LMoveOperands other_move = moves_[i]; | |
312 if (other_move.Blocks(source)) { | |
313 moves_[i].set_source(destination); | |
314 } else if (other_move.Blocks(destination)) { | |
315 moves_[i].set_source(source); | |
316 } | |
317 } | |
318 } | |
319 | |
320 #undef __ | |
321 | |
322 } } // namespace v8::internal | |
OLD | NEW |