OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
184 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { | 184 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { |
185 ++count; | 185 ++count; |
186 } | 186 } |
187 } | 187 } |
188 return count; | 188 return count; |
189 } | 189 } |
190 | 190 |
191 | 191 |
192 Register LGapResolver::GetFreeRegisterNot(Register reg) { | 192 Register LGapResolver::GetFreeRegisterNot(Register reg) { |
193 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); | 193 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); |
194 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { | 194 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { |
195 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { | 195 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { |
196 return Register::FromAllocationIndex(i); | 196 return Register::FromAllocationIndex(i); |
197 } | 197 } |
198 } | 198 } |
199 return no_reg; | 199 return no_reg; |
200 } | 200 } |
201 | 201 |
202 | 202 |
203 bool LGapResolver::HasBeenReset() { | 203 bool LGapResolver::HasBeenReset() { |
204 if (!moves_.is_empty()) return false; | 204 if (!moves_.is_empty()) return false; |
205 if (spilled_register_ >= 0) return false; | 205 if (spilled_register_ >= 0) return false; |
206 | 206 |
207 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { | 207 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { |
208 if (source_uses_[i] != 0) return false; | 208 if (source_uses_[i] != 0) return false; |
209 if (destination_uses_[i] != 0) return false; | 209 if (destination_uses_[i] != 0) return false; |
210 } | 210 } |
211 return true; | 211 return true; |
212 } | 212 } |
213 | 213 |
214 | 214 |
215 void LGapResolver::Verify() { | 215 void LGapResolver::Verify() { |
216 #ifdef ENABLE_SLOW_ASSERTS | 216 #ifdef ENABLE_SLOW_ASSERTS |
217 // No operand should be the destination for more than one move. | 217 // No operand should be the destination for more than one move. |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
249 if (spilled_register_ >= 0) { | 249 if (spilled_register_ >= 0) { |
250 return Register::FromAllocationIndex(spilled_register_); | 250 return Register::FromAllocationIndex(spilled_register_); |
251 } | 251 } |
252 | 252 |
253 // 2. We may have a free register that we can use without spilling. | 253 // 2. We may have a free register that we can use without spilling. |
254 Register free = GetFreeRegisterNot(no_reg); | 254 Register free = GetFreeRegisterNot(no_reg); |
255 if (!free.is(no_reg)) return free; | 255 if (!free.is(no_reg)) return free; |
256 | 256 |
257 // 3. Prefer to spill a register that is not used in any remaining move | 257 // 3. Prefer to spill a register that is not used in any remaining move |
258 // because it will not need to be restored until the end. | 258 // because it will not need to be restored until the end. |
259 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { | 259 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { |
260 if (source_uses_[i] == 0 && destination_uses_[i] == 0) { | 260 if (source_uses_[i] == 0 && destination_uses_[i] == 0) { |
261 Register scratch = Register::FromAllocationIndex(i); | 261 Register scratch = Register::FromAllocationIndex(i); |
262 __ push(scratch); | 262 __ push(scratch); |
263 spilled_register_ = i; | 263 spilled_register_ = i; |
264 return scratch; | 264 return scratch; |
265 } | 265 } |
266 } | 266 } |
267 | 267 |
268 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. | 268 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. |
269 Register scratch = Register::FromAllocationIndex(0); | 269 Register scratch = Register::FromAllocationIndex(0); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
317 if (cgen_->IsInteger32(constant_source)) { | 317 if (cgen_->IsInteger32(constant_source)) { |
318 __ Set(dst, cgen_->ToInteger32Immediate(constant_source)); | 318 __ Set(dst, cgen_->ToInteger32Immediate(constant_source)); |
319 } else { | 319 } else { |
320 Register tmp = EnsureTempRegister(); | 320 Register tmp = EnsureTempRegister(); |
321 __ LoadObject(tmp, cgen_->ToHandle(constant_source)); | 321 __ LoadObject(tmp, cgen_->ToHandle(constant_source)); |
322 __ mov(dst, tmp); | 322 __ mov(dst, tmp); |
323 } | 323 } |
324 } | 324 } |
325 | 325 |
326 } else if (source->IsDoubleRegister()) { | 326 } else if (source->IsDoubleRegister()) { |
327 if (CpuFeatures::IsSupported(SSE2)) { | 327 XMMRegister src = cgen_->ToDoubleRegister(source); |
328 CpuFeatures::Scope scope(SSE2); | 328 if (destination->IsDoubleRegister()) { |
329 XMMRegister src = cgen_->ToDoubleRegister(source); | 329 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
330 if (destination->IsDoubleRegister()) { | 330 __ movaps(dst, src); |
331 XMMRegister dst = cgen_->ToDoubleRegister(destination); | |
332 __ movaps(dst, src); | |
333 } else { | |
334 ASSERT(destination->IsDoubleStackSlot()); | |
335 Operand dst = cgen_->ToOperand(destination); | |
336 __ movdbl(dst, src); | |
337 } | |
338 } else { | 331 } else { |
339 UNREACHABLE(); | 332 ASSERT(destination->IsDoubleStackSlot()); |
| 333 Operand dst = cgen_->ToOperand(destination); |
| 334 __ movdbl(dst, src); |
340 } | 335 } |
341 } else if (source->IsDoubleStackSlot()) { | 336 } else if (source->IsDoubleStackSlot()) { |
342 if (CpuFeatures::IsSupported(SSE2)) { | 337 ASSERT(destination->IsDoubleRegister() || |
343 CpuFeatures::Scope scope(SSE2); | 338 destination->IsDoubleStackSlot()); |
344 ASSERT(destination->IsDoubleRegister() || | 339 Operand src = cgen_->ToOperand(source); |
345 destination->IsDoubleStackSlot()); | 340 if (destination->IsDoubleRegister()) { |
346 Operand src = cgen_->ToOperand(source); | 341 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
347 if (destination->IsDoubleRegister()) { | 342 __ movdbl(dst, src); |
348 XMMRegister dst = cgen_->ToDoubleRegister(destination); | |
349 __ movdbl(dst, src); | |
350 } else { | |
351 // We rely on having xmm0 available as a fixed scratch register. | |
352 Operand dst = cgen_->ToOperand(destination); | |
353 __ movdbl(xmm0, src); | |
354 __ movdbl(dst, xmm0); | |
355 } | |
356 } else { | 343 } else { |
357 UNREACHABLE(); | 344 // We rely on having xmm0 available as a fixed scratch register. |
| 345 Operand dst = cgen_->ToOperand(destination); |
| 346 __ movdbl(xmm0, src); |
| 347 __ movdbl(dst, xmm0); |
358 } | 348 } |
| 349 |
359 } else { | 350 } else { |
360 UNREACHABLE(); | 351 UNREACHABLE(); |
361 } | 352 } |
362 | 353 |
363 RemoveMove(index); | 354 RemoveMove(index); |
364 } | 355 } |
365 | 356 |
366 | 357 |
367 void LGapResolver::EmitSwap(int index) { | 358 void LGapResolver::EmitSwap(int index) { |
368 LOperand* source = moves_[index].source(); | 359 LOperand* source = moves_[index].source(); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
412 __ xor_(src, tmp0); | 403 __ xor_(src, tmp0); |
413 __ xor_(tmp0, src); | 404 __ xor_(tmp0, src); |
414 __ mov(dst, tmp0); | 405 __ mov(dst, tmp0); |
415 } else { | 406 } else { |
416 __ mov(tmp0, dst); | 407 __ mov(tmp0, dst); |
417 __ mov(tmp1, src); | 408 __ mov(tmp1, src); |
418 __ mov(dst, tmp1); | 409 __ mov(dst, tmp1); |
419 __ mov(src, tmp0); | 410 __ mov(src, tmp0); |
420 } | 411 } |
421 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { | 412 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { |
422 CpuFeatures::Scope scope(SSE2); | |
423 // XMM register-register swap. We rely on having xmm0 | 413 // XMM register-register swap. We rely on having xmm0 |
424 // available as a fixed scratch register. | 414 // available as a fixed scratch register. |
425 XMMRegister src = cgen_->ToDoubleRegister(source); | 415 XMMRegister src = cgen_->ToDoubleRegister(source); |
426 XMMRegister dst = cgen_->ToDoubleRegister(destination); | 416 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
427 __ movaps(xmm0, src); | 417 __ movaps(xmm0, src); |
428 __ movaps(src, dst); | 418 __ movaps(src, dst); |
429 __ movaps(dst, xmm0); | 419 __ movaps(dst, xmm0); |
430 | 420 |
431 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { | 421 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { |
432 // XMM register-memory swap. We rely on having xmm0 | 422 // XMM register-memory swap. We rely on having xmm0 |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
491 } else if (destination->IsRegister()) { | 481 } else if (destination->IsRegister()) { |
492 source_uses_[destination->index()] = CountSourceUses(destination); | 482 source_uses_[destination->index()] = CountSourceUses(destination); |
493 } | 483 } |
494 } | 484 } |
495 | 485 |
496 #undef __ | 486 #undef __ |
497 | 487 |
498 } } // namespace v8::internal | 488 } } // namespace v8::internal |
499 | 489 |
500 #endif // V8_TARGET_ARCH_IA32 | 490 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |