| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_IA32 | 7 #if V8_TARGET_ARCH_IA32 |
| 8 | 8 |
| 9 #include "src/ia32/lithium-codegen-ia32.h" | 9 #include "src/ia32/lithium-codegen-ia32.h" |
| 10 #include "src/ia32/lithium-gap-resolver-ia32.h" | 10 #include "src/ia32/lithium-gap-resolver-ia32.h" |
| 11 | 11 |
| 12 namespace v8 { | 12 namespace v8 { |
| 13 namespace internal { | 13 namespace internal { |
| 14 | 14 |
| 15 LGapResolver::LGapResolver(LCodeGen* owner) | 15 LGapResolver::LGapResolver(LCodeGen* owner) |
| 16 : cgen_(owner), | 16 : cgen_(owner), |
| 17 moves_(32, owner->zone()), | 17 moves_(32, owner->zone()), |
| 18 source_uses_(), | 18 source_uses_(), |
| 19 destination_uses_(), | 19 destination_uses_(), |
| 20 spilled_register_(-1) {} | 20 spilled_register_(-1) {} |
| 21 | 21 |
| 22 | 22 |
| 23 void LGapResolver::Resolve(LParallelMove* parallel_move) { | 23 void LGapResolver::Resolve(LParallelMove* parallel_move) { |
| 24 ASSERT(HasBeenReset()); | 24 DCHECK(HasBeenReset()); |
| 25 // Build up a worklist of moves. | 25 // Build up a worklist of moves. |
| 26 BuildInitialMoveList(parallel_move); | 26 BuildInitialMoveList(parallel_move); |
| 27 | 27 |
| 28 for (int i = 0; i < moves_.length(); ++i) { | 28 for (int i = 0; i < moves_.length(); ++i) { |
| 29 LMoveOperands move = moves_[i]; | 29 LMoveOperands move = moves_[i]; |
| 30 // Skip constants to perform them last. They don't block other moves | 30 // Skip constants to perform them last. They don't block other moves |
| 31 // and skipping such moves with register destinations keeps those | 31 // and skipping such moves with register destinations keeps those |
| 32 // registers free for the whole algorithm. | 32 // registers free for the whole algorithm. |
| 33 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { | 33 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { |
| 34 PerformMove(i); | 34 PerformMove(i); |
| 35 } | 35 } |
| 36 } | 36 } |
| 37 | 37 |
| 38 // Perform the moves with constant sources. | 38 // Perform the moves with constant sources. |
| 39 for (int i = 0; i < moves_.length(); ++i) { | 39 for (int i = 0; i < moves_.length(); ++i) { |
| 40 if (!moves_[i].IsEliminated()) { | 40 if (!moves_[i].IsEliminated()) { |
| 41 ASSERT(moves_[i].source()->IsConstantOperand()); | 41 DCHECK(moves_[i].source()->IsConstantOperand()); |
| 42 EmitMove(i); | 42 EmitMove(i); |
| 43 } | 43 } |
| 44 } | 44 } |
| 45 | 45 |
| 46 Finish(); | 46 Finish(); |
| 47 ASSERT(HasBeenReset()); | 47 DCHECK(HasBeenReset()); |
| 48 } | 48 } |
| 49 | 49 |
| 50 | 50 |
| 51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { | 51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { |
| 52 // Perform a linear sweep of the moves to add them to the initial list of | 52 // Perform a linear sweep of the moves to add them to the initial list of |
| 53 // moves to perform, ignoring any move that is redundant (the source is | 53 // moves to perform, ignoring any move that is redundant (the source is |
| 54 // the same as the destination, the destination is ignored and | 54 // the same as the destination, the destination is ignored and |
| 55 // unallocated, or the move was already eliminated). | 55 // unallocated, or the move was already eliminated). |
| 56 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); | 56 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); |
| 57 for (int i = 0; i < moves->length(); ++i) { | 57 for (int i = 0; i < moves->length(); ++i) { |
| 58 LMoveOperands move = moves->at(i); | 58 LMoveOperands move = moves->at(i); |
| 59 if (!move.IsRedundant()) AddMove(move); | 59 if (!move.IsRedundant()) AddMove(move); |
| 60 } | 60 } |
| 61 Verify(); | 61 Verify(); |
| 62 } | 62 } |
| 63 | 63 |
| 64 | 64 |
| 65 void LGapResolver::PerformMove(int index) { | 65 void LGapResolver::PerformMove(int index) { |
| 66 // Each call to this function performs a move and deletes it from the move | 66 // Each call to this function performs a move and deletes it from the move |
| 67 // graph. We first recursively perform any move blocking this one. We | 67 // graph. We first recursively perform any move blocking this one. We |
| 68 // mark a move as "pending" on entry to PerformMove in order to detect | 68 // mark a move as "pending" on entry to PerformMove in order to detect |
| 69 // cycles in the move graph. We use operand swaps to resolve cycles, | 69 // cycles in the move graph. We use operand swaps to resolve cycles, |
| 70 // which means that a call to PerformMove could change any source operand | 70 // which means that a call to PerformMove could change any source operand |
| 71 // in the move graph. | 71 // in the move graph. |
| 72 | 72 |
| 73 ASSERT(!moves_[index].IsPending()); | 73 DCHECK(!moves_[index].IsPending()); |
| 74 ASSERT(!moves_[index].IsRedundant()); | 74 DCHECK(!moves_[index].IsRedundant()); |
| 75 | 75 |
| 76 // Clear this move's destination to indicate a pending move. The actual | 76 // Clear this move's destination to indicate a pending move. The actual |
| 77 // destination is saved on the side. | 77 // destination is saved on the side. |
| 78 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. | 78 DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. |
| 79 LOperand* destination = moves_[index].destination(); | 79 LOperand* destination = moves_[index].destination(); |
| 80 moves_[index].set_destination(NULL); | 80 moves_[index].set_destination(NULL); |
| 81 | 81 |
| 82 // Perform a depth-first traversal of the move graph to resolve | 82 // Perform a depth-first traversal of the move graph to resolve |
| 83 // dependencies. Any unperformed, unpending move with a source the same | 83 // dependencies. Any unperformed, unpending move with a source the same |
| 84 // as this one's destination blocks this one so recursively perform all | 84 // as this one's destination blocks this one so recursively perform all |
| 85 // such moves. | 85 // such moves. |
| 86 for (int i = 0; i < moves_.length(); ++i) { | 86 for (int i = 0; i < moves_.length(); ++i) { |
| 87 LMoveOperands other_move = moves_[i]; | 87 LMoveOperands other_move = moves_[i]; |
| 88 if (other_move.Blocks(destination) && !other_move.IsPending()) { | 88 if (other_move.Blocks(destination) && !other_move.IsPending()) { |
| (...skipping 20 matching lines...) Expand all Loading... |
| 109 RemoveMove(index); | 109 RemoveMove(index); |
| 110 return; | 110 return; |
| 111 } | 111 } |
| 112 | 112 |
| 113 // The move may be blocked on a (at most one) pending move, in which case | 113 // The move may be blocked on a (at most one) pending move, in which case |
| 114 // we have a cycle. Search for such a blocking move and perform a swap to | 114 // we have a cycle. Search for such a blocking move and perform a swap to |
| 115 // resolve it. | 115 // resolve it. |
| 116 for (int i = 0; i < moves_.length(); ++i) { | 116 for (int i = 0; i < moves_.length(); ++i) { |
| 117 LMoveOperands other_move = moves_[i]; | 117 LMoveOperands other_move = moves_[i]; |
| 118 if (other_move.Blocks(destination)) { | 118 if (other_move.Blocks(destination)) { |
| 119 ASSERT(other_move.IsPending()); | 119 DCHECK(other_move.IsPending()); |
| 120 EmitSwap(index); | 120 EmitSwap(index); |
| 121 return; | 121 return; |
| 122 } | 122 } |
| 123 } | 123 } |
| 124 | 124 |
| 125 // This move is not blocked. | 125 // This move is not blocked. |
| 126 EmitMove(index); | 126 EmitMove(index); |
| 127 } | 127 } |
| 128 | 128 |
| 129 | 129 |
| 130 void LGapResolver::AddMove(LMoveOperands move) { | 130 void LGapResolver::AddMove(LMoveOperands move) { |
| 131 LOperand* source = move.source(); | 131 LOperand* source = move.source(); |
| 132 if (source->IsRegister()) ++source_uses_[source->index()]; | 132 if (source->IsRegister()) ++source_uses_[source->index()]; |
| 133 | 133 |
| 134 LOperand* destination = move.destination(); | 134 LOperand* destination = move.destination(); |
| 135 if (destination->IsRegister()) ++destination_uses_[destination->index()]; | 135 if (destination->IsRegister()) ++destination_uses_[destination->index()]; |
| 136 | 136 |
| 137 moves_.Add(move, cgen_->zone()); | 137 moves_.Add(move, cgen_->zone()); |
| 138 } | 138 } |
| 139 | 139 |
| 140 | 140 |
| 141 void LGapResolver::RemoveMove(int index) { | 141 void LGapResolver::RemoveMove(int index) { |
| 142 LOperand* source = moves_[index].source(); | 142 LOperand* source = moves_[index].source(); |
| 143 if (source->IsRegister()) { | 143 if (source->IsRegister()) { |
| 144 --source_uses_[source->index()]; | 144 --source_uses_[source->index()]; |
| 145 ASSERT(source_uses_[source->index()] >= 0); | 145 DCHECK(source_uses_[source->index()] >= 0); |
| 146 } | 146 } |
| 147 | 147 |
| 148 LOperand* destination = moves_[index].destination(); | 148 LOperand* destination = moves_[index].destination(); |
| 149 if (destination->IsRegister()) { | 149 if (destination->IsRegister()) { |
| 150 --destination_uses_[destination->index()]; | 150 --destination_uses_[destination->index()]; |
| 151 ASSERT(destination_uses_[destination->index()] >= 0); | 151 DCHECK(destination_uses_[destination->index()] >= 0); |
| 152 } | 152 } |
| 153 | 153 |
| 154 moves_[index].Eliminate(); | 154 moves_[index].Eliminate(); |
| 155 } | 155 } |
| 156 | 156 |
| 157 | 157 |
| 158 int LGapResolver::CountSourceUses(LOperand* operand) { | 158 int LGapResolver::CountSourceUses(LOperand* operand) { |
| 159 int count = 0; | 159 int count = 0; |
| 160 for (int i = 0; i < moves_.length(); ++i) { | 160 for (int i = 0; i < moves_.length(); ++i) { |
| 161 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { | 161 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { |
| (...skipping 21 matching lines...) Expand all Loading... |
| 183 | 183 |
| 184 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { | 184 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { |
| 185 if (source_uses_[i] != 0) return false; | 185 if (source_uses_[i] != 0) return false; |
| 186 if (destination_uses_[i] != 0) return false; | 186 if (destination_uses_[i] != 0) return false; |
| 187 } | 187 } |
| 188 return true; | 188 return true; |
| 189 } | 189 } |
| 190 | 190 |
| 191 | 191 |
| 192 void LGapResolver::Verify() { | 192 void LGapResolver::Verify() { |
| 193 #ifdef ENABLE_SLOW_ASSERTS | 193 #ifdef ENABLE_SLOW_DCHECKS |
| 194 // No operand should be the destination for more than one move. | 194 // No operand should be the destination for more than one move. |
| 195 for (int i = 0; i < moves_.length(); ++i) { | 195 for (int i = 0; i < moves_.length(); ++i) { |
| 196 LOperand* destination = moves_[i].destination(); | 196 LOperand* destination = moves_[i].destination(); |
| 197 for (int j = i + 1; j < moves_.length(); ++j) { | 197 for (int j = i + 1; j < moves_.length(); ++j) { |
| 198 SLOW_ASSERT(!destination->Equals(moves_[j].destination())); | 198 SLOW_DCHECK(!destination->Equals(moves_[j].destination())); |
| 199 } | 199 } |
| 200 } | 200 } |
| 201 #endif | 201 #endif |
| 202 } | 202 } |
| 203 | 203 |
| 204 | 204 |
| 205 #define __ ACCESS_MASM(cgen_->masm()) | 205 #define __ ACCESS_MASM(cgen_->masm()) |
| 206 | 206 |
| 207 void LGapResolver::Finish() { | 207 void LGapResolver::Finish() { |
| 208 if (spilled_register_ >= 0) { | 208 if (spilled_register_ >= 0) { |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 252 | 252 |
| 253 void LGapResolver::EmitMove(int index) { | 253 void LGapResolver::EmitMove(int index) { |
| 254 LOperand* source = moves_[index].source(); | 254 LOperand* source = moves_[index].source(); |
| 255 LOperand* destination = moves_[index].destination(); | 255 LOperand* destination = moves_[index].destination(); |
| 256 EnsureRestored(source); | 256 EnsureRestored(source); |
| 257 EnsureRestored(destination); | 257 EnsureRestored(destination); |
| 258 | 258 |
| 259 // Dispatch on the source and destination operand kinds. Not all | 259 // Dispatch on the source and destination operand kinds. Not all |
| 260 // combinations are possible. | 260 // combinations are possible. |
| 261 if (source->IsRegister()) { | 261 if (source->IsRegister()) { |
| 262 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | 262 DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
| 263 Register src = cgen_->ToRegister(source); | 263 Register src = cgen_->ToRegister(source); |
| 264 Operand dst = cgen_->ToOperand(destination); | 264 Operand dst = cgen_->ToOperand(destination); |
| 265 __ mov(dst, src); | 265 __ mov(dst, src); |
| 266 | 266 |
| 267 } else if (source->IsStackSlot()) { | 267 } else if (source->IsStackSlot()) { |
| 268 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | 268 DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
| 269 Operand src = cgen_->ToOperand(source); | 269 Operand src = cgen_->ToOperand(source); |
| 270 if (destination->IsRegister()) { | 270 if (destination->IsRegister()) { |
| 271 Register dst = cgen_->ToRegister(destination); | 271 Register dst = cgen_->ToRegister(destination); |
| 272 __ mov(dst, src); | 272 __ mov(dst, src); |
| 273 } else { | 273 } else { |
| 274 // Spill on demand to use a temporary register for memory-to-memory | 274 // Spill on demand to use a temporary register for memory-to-memory |
| 275 // moves. | 275 // moves. |
| 276 Register tmp = EnsureTempRegister(); | 276 Register tmp = EnsureTempRegister(); |
| 277 Operand dst = cgen_->ToOperand(destination); | 277 Operand dst = cgen_->ToOperand(destination); |
| 278 __ mov(tmp, src); | 278 __ mov(tmp, src); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 298 XMMRegister dst = cgen_->ToDoubleRegister(destination); | 298 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
| 299 if (int_val == 0) { | 299 if (int_val == 0) { |
| 300 __ xorps(dst, dst); | 300 __ xorps(dst, dst); |
| 301 } else { | 301 } else { |
| 302 __ push(Immediate(upper)); | 302 __ push(Immediate(upper)); |
| 303 __ push(Immediate(lower)); | 303 __ push(Immediate(lower)); |
| 304 __ movsd(dst, Operand(esp, 0)); | 304 __ movsd(dst, Operand(esp, 0)); |
| 305 __ add(esp, Immediate(kDoubleSize)); | 305 __ add(esp, Immediate(kDoubleSize)); |
| 306 } | 306 } |
| 307 } else { | 307 } else { |
| 308 ASSERT(destination->IsStackSlot()); | 308 DCHECK(destination->IsStackSlot()); |
| 309 Operand dst = cgen_->ToOperand(destination); | 309 Operand dst = cgen_->ToOperand(destination); |
| 310 Representation r = cgen_->IsSmi(constant_source) | 310 Representation r = cgen_->IsSmi(constant_source) |
| 311 ? Representation::Smi() : Representation::Integer32(); | 311 ? Representation::Smi() : Representation::Integer32(); |
| 312 if (cgen_->IsInteger32(constant_source)) { | 312 if (cgen_->IsInteger32(constant_source)) { |
| 313 __ Move(dst, cgen_->ToImmediate(constant_source, r)); | 313 __ Move(dst, cgen_->ToImmediate(constant_source, r)); |
| 314 } else { | 314 } else { |
| 315 Register tmp = EnsureTempRegister(); | 315 Register tmp = EnsureTempRegister(); |
| 316 __ LoadObject(tmp, cgen_->ToHandle(constant_source)); | 316 __ LoadObject(tmp, cgen_->ToHandle(constant_source)); |
| 317 __ mov(dst, tmp); | 317 __ mov(dst, tmp); |
| 318 } | 318 } |
| 319 } | 319 } |
| 320 | 320 |
| 321 } else if (source->IsDoubleRegister()) { | 321 } else if (source->IsDoubleRegister()) { |
| 322 XMMRegister src = cgen_->ToDoubleRegister(source); | 322 XMMRegister src = cgen_->ToDoubleRegister(source); |
| 323 if (destination->IsDoubleRegister()) { | 323 if (destination->IsDoubleRegister()) { |
| 324 XMMRegister dst = cgen_->ToDoubleRegister(destination); | 324 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
| 325 __ movaps(dst, src); | 325 __ movaps(dst, src); |
| 326 } else { | 326 } else { |
| 327 ASSERT(destination->IsDoubleStackSlot()); | 327 DCHECK(destination->IsDoubleStackSlot()); |
| 328 Operand dst = cgen_->ToOperand(destination); | 328 Operand dst = cgen_->ToOperand(destination); |
| 329 __ movsd(dst, src); | 329 __ movsd(dst, src); |
| 330 } | 330 } |
| 331 } else if (source->IsDoubleStackSlot()) { | 331 } else if (source->IsDoubleStackSlot()) { |
| 332 ASSERT(destination->IsDoubleRegister() || | 332 DCHECK(destination->IsDoubleRegister() || |
| 333 destination->IsDoubleStackSlot()); | 333 destination->IsDoubleStackSlot()); |
| 334 Operand src = cgen_->ToOperand(source); | 334 Operand src = cgen_->ToOperand(source); |
| 335 if (destination->IsDoubleRegister()) { | 335 if (destination->IsDoubleRegister()) { |
| 336 XMMRegister dst = cgen_->ToDoubleRegister(destination); | 336 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
| 337 __ movsd(dst, src); | 337 __ movsd(dst, src); |
| 338 } else { | 338 } else { |
| 339 // We rely on having xmm0 available as a fixed scratch register. | 339 // We rely on having xmm0 available as a fixed scratch register. |
| 340 Operand dst = cgen_->ToOperand(destination); | 340 Operand dst = cgen_->ToOperand(destination); |
| 341 __ movsd(xmm0, src); | 341 __ movsd(xmm0, src); |
| 342 __ movsd(dst, xmm0); | 342 __ movsd(dst, xmm0); |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 407 // XMM register-register swap. We rely on having xmm0 | 407 // XMM register-register swap. We rely on having xmm0 |
| 408 // available as a fixed scratch register. | 408 // available as a fixed scratch register. |
| 409 XMMRegister src = cgen_->ToDoubleRegister(source); | 409 XMMRegister src = cgen_->ToDoubleRegister(source); |
| 410 XMMRegister dst = cgen_->ToDoubleRegister(destination); | 410 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
| 411 __ movaps(xmm0, src); | 411 __ movaps(xmm0, src); |
| 412 __ movaps(src, dst); | 412 __ movaps(src, dst); |
| 413 __ movaps(dst, xmm0); | 413 __ movaps(dst, xmm0); |
| 414 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { | 414 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { |
| 415 // XMM register-memory swap. We rely on having xmm0 | 415 // XMM register-memory swap. We rely on having xmm0 |
| 416 // available as a fixed scratch register. | 416 // available as a fixed scratch register. |
| 417 ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot()); | 417 DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot()); |
| 418 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() | 418 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister() |
| 419 ? source | 419 ? source |
| 420 : destination); | 420 : destination); |
| 421 Operand other = | 421 Operand other = |
| 422 cgen_->ToOperand(source->IsDoubleRegister() ? destination : source); | 422 cgen_->ToOperand(source->IsDoubleRegister() ? destination : source); |
| 423 __ movsd(xmm0, other); | 423 __ movsd(xmm0, other); |
| 424 __ movsd(other, reg); | 424 __ movsd(other, reg); |
| 425 __ movaps(reg, xmm0); | 425 __ movaps(reg, xmm0); |
| 426 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { | 426 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { |
| 427 // Double-width memory-to-memory. Spill on demand to use a general | 427 // Double-width memory-to-memory. Spill on demand to use a general |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 473 } else if (destination->IsRegister()) { | 473 } else if (destination->IsRegister()) { |
| 474 source_uses_[destination->index()] = CountSourceUses(destination); | 474 source_uses_[destination->index()] = CountSourceUses(destination); |
| 475 } | 475 } |
| 476 } | 476 } |
| 477 | 477 |
| 478 #undef __ | 478 #undef __ |
| 479 | 479 |
| 480 } } // namespace v8::internal | 480 } } // namespace v8::internal |
| 481 | 481 |
| 482 #endif // V8_TARGET_ARCH_IA32 | 482 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |