OLD | NEW |
| (Empty) |
1 // Copyright 2011 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #if V8_TARGET_ARCH_X87 | |
6 | |
7 #include "src/x87/lithium-codegen-x87.h" | |
8 #include "src/x87/lithium-gap-resolver-x87.h" | |
9 | |
10 namespace v8 { | |
11 namespace internal { | |
12 | |
13 LGapResolver::LGapResolver(LCodeGen* owner) | |
14 : cgen_(owner), | |
15 moves_(32, owner->zone()), | |
16 source_uses_(), | |
17 destination_uses_(), | |
18 spilled_register_(-1) {} | |
19 | |
20 | |
21 void LGapResolver::Resolve(LParallelMove* parallel_move) { | |
22 DCHECK(HasBeenReset()); | |
23 // Build up a worklist of moves. | |
24 BuildInitialMoveList(parallel_move); | |
25 | |
26 for (int i = 0; i < moves_.length(); ++i) { | |
27 LMoveOperands move = moves_[i]; | |
28 // Skip constants to perform them last. They don't block other moves | |
29 // and skipping such moves with register destinations keeps those | |
30 // registers free for the whole algorithm. | |
31 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { | |
32 PerformMove(i); | |
33 } | |
34 } | |
35 | |
36 // Perform the moves with constant sources. | |
37 for (int i = 0; i < moves_.length(); ++i) { | |
38 if (!moves_[i].IsEliminated()) { | |
39 DCHECK(moves_[i].source()->IsConstantOperand()); | |
40 EmitMove(i); | |
41 } | |
42 } | |
43 | |
44 Finish(); | |
45 DCHECK(HasBeenReset()); | |
46 } | |
47 | |
48 | |
49 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { | |
50 // Perform a linear sweep of the moves to add them to the initial list of | |
51 // moves to perform, ignoring any move that is redundant (the source is | |
52 // the same as the destination, the destination is ignored and | |
53 // unallocated, or the move was already eliminated). | |
54 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); | |
55 for (int i = 0; i < moves->length(); ++i) { | |
56 LMoveOperands move = moves->at(i); | |
57 if (!move.IsRedundant()) AddMove(move); | |
58 } | |
59 Verify(); | |
60 } | |
61 | |
62 | |
63 void LGapResolver::PerformMove(int index) { | |
64 // Each call to this function performs a move and deletes it from the move | |
65 // graph. We first recursively perform any move blocking this one. We | |
66 // mark a move as "pending" on entry to PerformMove in order to detect | |
67 // cycles in the move graph. We use operand swaps to resolve cycles, | |
68 // which means that a call to PerformMove could change any source operand | |
69 // in the move graph. | |
70 | |
71 DCHECK(!moves_[index].IsPending()); | |
72 DCHECK(!moves_[index].IsRedundant()); | |
73 | |
74 // Clear this move's destination to indicate a pending move. The actual | |
75 // destination is saved on the side. | |
76 DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. | |
77 LOperand* destination = moves_[index].destination(); | |
78 moves_[index].set_destination(NULL); | |
79 | |
80 // Perform a depth-first traversal of the move graph to resolve | |
81 // dependencies. Any unperformed, unpending move with a source the same | |
82 // as this one's destination blocks this one so recursively perform all | |
83 // such moves. | |
84 for (int i = 0; i < moves_.length(); ++i) { | |
85 LMoveOperands other_move = moves_[i]; | |
86 if (other_move.Blocks(destination) && !other_move.IsPending()) { | |
87 // Though PerformMove can change any source operand in the move graph, | |
88 // this call cannot create a blocking move via a swap (this loop does | |
89 // not miss any). Assume there is a non-blocking move with source A | |
90 // and this move is blocked on source B and there is a swap of A and | |
91 // B. Then A and B must be involved in the same cycle (or they would | |
92 // not be swapped). Since this move's destination is B and there is | |
93 // only a single incoming edge to an operand, this move must also be | |
94 // involved in the same cycle. In that case, the blocking move will | |
95 // be created but will be "pending" when we return from PerformMove. | |
96 PerformMove(i); | |
97 } | |
98 } | |
99 | |
100 // We are about to resolve this move and don't need it marked as | |
101 // pending, so restore its destination. | |
102 moves_[index].set_destination(destination); | |
103 | |
104 // This move's source may have changed due to swaps to resolve cycles and | |
105 // so it may now be the last move in the cycle. If so remove it. | |
106 if (moves_[index].source()->Equals(destination)) { | |
107 RemoveMove(index); | |
108 return; | |
109 } | |
110 | |
111 // The move may be blocked on a (at most one) pending move, in which case | |
112 // we have a cycle. Search for such a blocking move and perform a swap to | |
113 // resolve it. | |
114 for (int i = 0; i < moves_.length(); ++i) { | |
115 LMoveOperands other_move = moves_[i]; | |
116 if (other_move.Blocks(destination)) { | |
117 DCHECK(other_move.IsPending()); | |
118 EmitSwap(index); | |
119 return; | |
120 } | |
121 } | |
122 | |
123 // This move is not blocked. | |
124 EmitMove(index); | |
125 } | |
126 | |
127 | |
128 void LGapResolver::AddMove(LMoveOperands move) { | |
129 LOperand* source = move.source(); | |
130 if (source->IsRegister()) ++source_uses_[source->index()]; | |
131 | |
132 LOperand* destination = move.destination(); | |
133 if (destination->IsRegister()) ++destination_uses_[destination->index()]; | |
134 | |
135 moves_.Add(move, cgen_->zone()); | |
136 } | |
137 | |
138 | |
139 void LGapResolver::RemoveMove(int index) { | |
140 LOperand* source = moves_[index].source(); | |
141 if (source->IsRegister()) { | |
142 --source_uses_[source->index()]; | |
143 DCHECK(source_uses_[source->index()] >= 0); | |
144 } | |
145 | |
146 LOperand* destination = moves_[index].destination(); | |
147 if (destination->IsRegister()) { | |
148 --destination_uses_[destination->index()]; | |
149 DCHECK(destination_uses_[destination->index()] >= 0); | |
150 } | |
151 | |
152 moves_[index].Eliminate(); | |
153 } | |
154 | |
155 | |
156 int LGapResolver::CountSourceUses(LOperand* operand) { | |
157 int count = 0; | |
158 for (int i = 0; i < moves_.length(); ++i) { | |
159 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { | |
160 ++count; | |
161 } | |
162 } | |
163 return count; | |
164 } | |
165 | |
166 | |
167 Register LGapResolver::GetFreeRegisterNot(Register reg) { | |
168 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); | |
169 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { | |
170 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { | |
171 return Register::FromAllocationIndex(i); | |
172 } | |
173 } | |
174 return no_reg; | |
175 } | |
176 | |
177 | |
178 bool LGapResolver::HasBeenReset() { | |
179 if (!moves_.is_empty()) return false; | |
180 if (spilled_register_ >= 0) return false; | |
181 | |
182 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { | |
183 if (source_uses_[i] != 0) return false; | |
184 if (destination_uses_[i] != 0) return false; | |
185 } | |
186 return true; | |
187 } | |
188 | |
189 | |
190 void LGapResolver::Verify() { | |
191 #ifdef ENABLE_SLOW_DCHECKS | |
192 // No operand should be the destination for more than one move. | |
193 for (int i = 0; i < moves_.length(); ++i) { | |
194 LOperand* destination = moves_[i].destination(); | |
195 for (int j = i + 1; j < moves_.length(); ++j) { | |
196 SLOW_DCHECK(!destination->Equals(moves_[j].destination())); | |
197 } | |
198 } | |
199 #endif | |
200 } | |
201 | |
202 | |
203 #define __ ACCESS_MASM(cgen_->masm()) | |
204 | |
205 void LGapResolver::Finish() { | |
206 if (spilled_register_ >= 0) { | |
207 __ pop(Register::FromAllocationIndex(spilled_register_)); | |
208 spilled_register_ = -1; | |
209 } | |
210 moves_.Rewind(0); | |
211 } | |
212 | |
213 | |
214 void LGapResolver::EnsureRestored(LOperand* operand) { | |
215 if (operand->IsRegister() && operand->index() == spilled_register_) { | |
216 __ pop(Register::FromAllocationIndex(spilled_register_)); | |
217 spilled_register_ = -1; | |
218 } | |
219 } | |
220 | |
221 | |
222 Register LGapResolver::EnsureTempRegister() { | |
223 // 1. We may have already spilled to create a temp register. | |
224 if (spilled_register_ >= 0) { | |
225 return Register::FromAllocationIndex(spilled_register_); | |
226 } | |
227 | |
228 // 2. We may have a free register that we can use without spilling. | |
229 Register free = GetFreeRegisterNot(no_reg); | |
230 if (!free.is(no_reg)) return free; | |
231 | |
232 // 3. Prefer to spill a register that is not used in any remaining move | |
233 // because it will not need to be restored until the end. | |
234 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { | |
235 if (source_uses_[i] == 0 && destination_uses_[i] == 0) { | |
236 Register scratch = Register::FromAllocationIndex(i); | |
237 __ push(scratch); | |
238 spilled_register_ = i; | |
239 return scratch; | |
240 } | |
241 } | |
242 | |
243 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. | |
244 Register scratch = Register::FromAllocationIndex(0); | |
245 __ push(scratch); | |
246 spilled_register_ = 0; | |
247 return scratch; | |
248 } | |
249 | |
250 | |
251 void LGapResolver::EmitMove(int index) { | |
252 LOperand* source = moves_[index].source(); | |
253 LOperand* destination = moves_[index].destination(); | |
254 EnsureRestored(source); | |
255 EnsureRestored(destination); | |
256 | |
257 // Dispatch on the source and destination operand kinds. Not all | |
258 // combinations are possible. | |
259 if (source->IsRegister()) { | |
260 DCHECK(destination->IsRegister() || destination->IsStackSlot()); | |
261 Register src = cgen_->ToRegister(source); | |
262 Operand dst = cgen_->ToOperand(destination); | |
263 __ mov(dst, src); | |
264 | |
265 } else if (source->IsStackSlot()) { | |
266 DCHECK(destination->IsRegister() || destination->IsStackSlot()); | |
267 Operand src = cgen_->ToOperand(source); | |
268 if (destination->IsRegister()) { | |
269 Register dst = cgen_->ToRegister(destination); | |
270 __ mov(dst, src); | |
271 } else { | |
272 // Spill on demand to use a temporary register for memory-to-memory | |
273 // moves. | |
274 Register tmp = EnsureTempRegister(); | |
275 Operand dst = cgen_->ToOperand(destination); | |
276 __ mov(tmp, src); | |
277 __ mov(dst, tmp); | |
278 } | |
279 | |
280 } else if (source->IsConstantOperand()) { | |
281 LConstantOperand* constant_source = LConstantOperand::cast(source); | |
282 if (destination->IsRegister()) { | |
283 Register dst = cgen_->ToRegister(destination); | |
284 Representation r = cgen_->IsSmi(constant_source) | |
285 ? Representation::Smi() : Representation::Integer32(); | |
286 if (cgen_->IsInteger32(constant_source)) { | |
287 __ Move(dst, cgen_->ToImmediate(constant_source, r)); | |
288 } else { | |
289 __ LoadObject(dst, cgen_->ToHandle(constant_source)); | |
290 } | |
291 } else if (destination->IsDoubleRegister()) { | |
292 double v = cgen_->ToDouble(constant_source); | |
293 uint64_t int_val = bit_cast<uint64_t, double>(v); | |
294 int32_t lower = static_cast<int32_t>(int_val); | |
295 int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt); | |
296 __ push(Immediate(upper)); | |
297 __ push(Immediate(lower)); | |
298 X87Register dst = cgen_->ToX87Register(destination); | |
299 cgen_->X87Mov(dst, MemOperand(esp, 0)); | |
300 __ add(esp, Immediate(kDoubleSize)); | |
301 } else { | |
302 DCHECK(destination->IsStackSlot()); | |
303 Operand dst = cgen_->ToOperand(destination); | |
304 Representation r = cgen_->IsSmi(constant_source) | |
305 ? Representation::Smi() : Representation::Integer32(); | |
306 if (cgen_->IsInteger32(constant_source)) { | |
307 __ Move(dst, cgen_->ToImmediate(constant_source, r)); | |
308 } else { | |
309 Register tmp = EnsureTempRegister(); | |
310 __ LoadObject(tmp, cgen_->ToHandle(constant_source)); | |
311 __ mov(dst, tmp); | |
312 } | |
313 } | |
314 | |
315 } else if (source->IsDoubleRegister()) { | |
316 // load from the register onto the stack, store in destination, which must | |
317 // be a double stack slot in the non-SSE2 case. | |
318 if (destination->IsDoubleStackSlot()) { | |
319 Operand dst = cgen_->ToOperand(destination); | |
320 X87Register src = cgen_->ToX87Register(source); | |
321 cgen_->X87Mov(dst, src); | |
322 } else { | |
323 X87Register dst = cgen_->ToX87Register(destination); | |
324 X87Register src = cgen_->ToX87Register(source); | |
325 cgen_->X87Mov(dst, src); | |
326 } | |
327 } else if (source->IsDoubleStackSlot()) { | |
328 // load from the stack slot on top of the floating point stack, and then | |
329 // store in destination. If destination is a double register, then it | |
330 // represents the top of the stack and nothing needs to be done. | |
331 if (destination->IsDoubleStackSlot()) { | |
332 Register tmp = EnsureTempRegister(); | |
333 Operand src0 = cgen_->ToOperand(source); | |
334 Operand src1 = cgen_->HighOperand(source); | |
335 Operand dst0 = cgen_->ToOperand(destination); | |
336 Operand dst1 = cgen_->HighOperand(destination); | |
337 __ mov(tmp, src0); // Then use tmp to copy source to destination. | |
338 __ mov(dst0, tmp); | |
339 __ mov(tmp, src1); | |
340 __ mov(dst1, tmp); | |
341 } else { | |
342 Operand src = cgen_->ToOperand(source); | |
343 X87Register dst = cgen_->ToX87Register(destination); | |
344 cgen_->X87Mov(dst, src); | |
345 } | |
346 } else { | |
347 UNREACHABLE(); | |
348 } | |
349 | |
350 RemoveMove(index); | |
351 } | |
352 | |
353 | |
354 void LGapResolver::EmitSwap(int index) { | |
355 LOperand* source = moves_[index].source(); | |
356 LOperand* destination = moves_[index].destination(); | |
357 EnsureRestored(source); | |
358 EnsureRestored(destination); | |
359 | |
360 // Dispatch on the source and destination operand kinds. Not all | |
361 // combinations are possible. | |
362 if (source->IsRegister() && destination->IsRegister()) { | |
363 // Register-register. | |
364 Register src = cgen_->ToRegister(source); | |
365 Register dst = cgen_->ToRegister(destination); | |
366 __ xchg(dst, src); | |
367 | |
368 } else if ((source->IsRegister() && destination->IsStackSlot()) || | |
369 (source->IsStackSlot() && destination->IsRegister())) { | |
370 // Register-memory. Use a free register as a temp if possible. Do not | |
371 // spill on demand because the simple spill implementation cannot avoid | |
372 // spilling src at this point. | |
373 Register tmp = GetFreeRegisterNot(no_reg); | |
374 Register reg = | |
375 cgen_->ToRegister(source->IsRegister() ? source : destination); | |
376 Operand mem = | |
377 cgen_->ToOperand(source->IsRegister() ? destination : source); | |
378 if (tmp.is(no_reg)) { | |
379 __ xor_(reg, mem); | |
380 __ xor_(mem, reg); | |
381 __ xor_(reg, mem); | |
382 } else { | |
383 __ mov(tmp, mem); | |
384 __ mov(mem, reg); | |
385 __ mov(reg, tmp); | |
386 } | |
387 | |
388 } else if (source->IsStackSlot() && destination->IsStackSlot()) { | |
389 // Memory-memory. Spill on demand to use a temporary. If there is a | |
390 // free register after that, use it as a second temporary. | |
391 Register tmp0 = EnsureTempRegister(); | |
392 Register tmp1 = GetFreeRegisterNot(tmp0); | |
393 Operand src = cgen_->ToOperand(source); | |
394 Operand dst = cgen_->ToOperand(destination); | |
395 if (tmp1.is(no_reg)) { | |
396 // Only one temp register available to us. | |
397 __ mov(tmp0, dst); | |
398 __ xor_(tmp0, src); | |
399 __ xor_(src, tmp0); | |
400 __ xor_(tmp0, src); | |
401 __ mov(dst, tmp0); | |
402 } else { | |
403 __ mov(tmp0, dst); | |
404 __ mov(tmp1, src); | |
405 __ mov(dst, tmp1); | |
406 __ mov(src, tmp0); | |
407 } | |
408 } else { | |
409 // No other combinations are possible. | |
410 UNREACHABLE(); | |
411 } | |
412 | |
413 // The swap of source and destination has executed a move from source to | |
414 // destination. | |
415 RemoveMove(index); | |
416 | |
417 // Any unperformed (including pending) move with a source of either | |
418 // this move's source or destination needs to have their source | |
419 // changed to reflect the state of affairs after the swap. | |
420 for (int i = 0; i < moves_.length(); ++i) { | |
421 LMoveOperands other_move = moves_[i]; | |
422 if (other_move.Blocks(source)) { | |
423 moves_[i].set_source(destination); | |
424 } else if (other_move.Blocks(destination)) { | |
425 moves_[i].set_source(source); | |
426 } | |
427 } | |
428 | |
429 // In addition to swapping the actual uses as sources, we need to update | |
430 // the use counts. | |
431 if (source->IsRegister() && destination->IsRegister()) { | |
432 int temp = source_uses_[source->index()]; | |
433 source_uses_[source->index()] = source_uses_[destination->index()]; | |
434 source_uses_[destination->index()] = temp; | |
435 } else if (source->IsRegister()) { | |
436 // We don't have use counts for non-register operands like destination. | |
437 // Compute those counts now. | |
438 source_uses_[source->index()] = CountSourceUses(source); | |
439 } else if (destination->IsRegister()) { | |
440 source_uses_[destination->index()] = CountSourceUses(destination); | |
441 } | |
442 } | |
443 | |
444 #undef __ | |
445 | |
446 } // namespace internal | |
447 } // namespace v8 | |
448 | |
449 #endif // V8_TARGET_ARCH_X87 | |
OLD | NEW |