OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
184 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { | 184 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { |
185 ++count; | 185 ++count; |
186 } | 186 } |
187 } | 187 } |
188 return count; | 188 return count; |
189 } | 189 } |
190 | 190 |
191 | 191 |
192 Register LGapResolver::GetFreeRegisterNot(Register reg) { | 192 Register LGapResolver::GetFreeRegisterNot(Register reg) { |
193 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); | 193 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); |
194 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | 194 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { |
195 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { | 195 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { |
196 return Register::FromAllocationIndex(i); | 196 return Register::FromAllocationIndex(i); |
197 } | 197 } |
198 } | 198 } |
199 return no_reg; | 199 return no_reg; |
200 } | 200 } |
201 | 201 |
202 | 202 |
203 bool LGapResolver::HasBeenReset() { | 203 bool LGapResolver::HasBeenReset() { |
204 if (!moves_.is_empty()) return false; | 204 if (!moves_.is_empty()) return false; |
205 if (spilled_register_ >= 0) return false; | 205 if (spilled_register_ >= 0) return false; |
206 | 206 |
207 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | 207 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { |
208 if (source_uses_[i] != 0) return false; | 208 if (source_uses_[i] != 0) return false; |
209 if (destination_uses_[i] != 0) return false; | 209 if (destination_uses_[i] != 0) return false; |
210 } | 210 } |
211 return true; | 211 return true; |
212 } | 212 } |
213 | 213 |
214 | 214 |
215 void LGapResolver::Verify() { | 215 void LGapResolver::Verify() { |
216 #ifdef ENABLE_SLOW_ASSERTS | 216 #ifdef ENABLE_SLOW_ASSERTS |
217 // No operand should be the destination for more than one move. | 217 // No operand should be the destination for more than one move. |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
249 if (spilled_register_ >= 0) { | 249 if (spilled_register_ >= 0) { |
250 return Register::FromAllocationIndex(spilled_register_); | 250 return Register::FromAllocationIndex(spilled_register_); |
251 } | 251 } |
252 | 252 |
253 // 2. We may have a free register that we can use without spilling. | 253 // 2. We may have a free register that we can use without spilling. |
254 Register free = GetFreeRegisterNot(no_reg); | 254 Register free = GetFreeRegisterNot(no_reg); |
255 if (!free.is(no_reg)) return free; | 255 if (!free.is(no_reg)) return free; |
256 | 256 |
257 // 3. Prefer to spill a register that is not used in any remaining move | 257 // 3. Prefer to spill a register that is not used in any remaining move |
258 // because it will not need to be restored until the end. | 258 // because it will not need to be restored until the end. |
259 for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { | 259 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { |
260 if (source_uses_[i] == 0 && destination_uses_[i] == 0) { | 260 if (source_uses_[i] == 0 && destination_uses_[i] == 0) { |
261 Register scratch = Register::FromAllocationIndex(i); | 261 Register scratch = Register::FromAllocationIndex(i); |
262 __ push(scratch); | 262 __ push(scratch); |
263 spilled_register_ = i; | 263 spilled_register_ = i; |
264 return scratch; | 264 return scratch; |
265 } | 265 } |
266 } | 266 } |
267 | 267 |
268 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. | 268 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. |
269 Register scratch = Register::FromAllocationIndex(0); | 269 Register scratch = Register::FromAllocationIndex(0); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
317 if (cgen_->IsInteger32(constant_source)) { | 317 if (cgen_->IsInteger32(constant_source)) { |
318 __ Set(dst, cgen_->ToInteger32Immediate(constant_source)); | 318 __ Set(dst, cgen_->ToInteger32Immediate(constant_source)); |
319 } else { | 319 } else { |
320 Register tmp = EnsureTempRegister(); | 320 Register tmp = EnsureTempRegister(); |
321 __ LoadObject(tmp, cgen_->ToHandle(constant_source)); | 321 __ LoadObject(tmp, cgen_->ToHandle(constant_source)); |
322 __ mov(dst, tmp); | 322 __ mov(dst, tmp); |
323 } | 323 } |
324 } | 324 } |
325 | 325 |
326 } else if (source->IsDoubleRegister()) { | 326 } else if (source->IsDoubleRegister()) { |
327 XMMRegister src = cgen_->ToDoubleRegister(source); | 327 if (CpuFeatures::IsSupported(SSE2)) { |
328 if (destination->IsDoubleRegister()) { | 328 CpuFeatures::Scope scope(SSE2); |
329 XMMRegister dst = cgen_->ToDoubleRegister(destination); | 329 XMMRegister src = cgen_->ToDoubleRegister(source); |
330 __ movaps(dst, src); | 330 if (destination->IsDoubleRegister()) { |
| 331 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
| 332 __ movaps(dst, src); |
| 333 } else { |
| 334 ASSERT(destination->IsDoubleStackSlot()); |
| 335 Operand dst = cgen_->ToOperand(destination); |
| 336 __ movdbl(dst, src); |
| 337 } |
331 } else { | 338 } else { |
332 ASSERT(destination->IsDoubleStackSlot()); | 339 UNREACHABLE(); |
333 Operand dst = cgen_->ToOperand(destination); | |
334 __ movdbl(dst, src); | |
335 } | 340 } |
336 } else if (source->IsDoubleStackSlot()) { | 341 } else if (source->IsDoubleStackSlot()) { |
337 ASSERT(destination->IsDoubleRegister() || | 342 if (CpuFeatures::IsSupported(SSE2)) { |
338 destination->IsDoubleStackSlot()); | 343 CpuFeatures::Scope scope(SSE2); |
339 Operand src = cgen_->ToOperand(source); | 344 ASSERT(destination->IsDoubleRegister() || |
340 if (destination->IsDoubleRegister()) { | 345 destination->IsDoubleStackSlot()); |
341 XMMRegister dst = cgen_->ToDoubleRegister(destination); | 346 Operand src = cgen_->ToOperand(source); |
342 __ movdbl(dst, src); | 347 if (destination->IsDoubleRegister()) { |
| 348 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
| 349 __ movdbl(dst, src); |
| 350 } else { |
| 351 // We rely on having xmm0 available as a fixed scratch register. |
| 352 Operand dst = cgen_->ToOperand(destination); |
| 353 __ movdbl(xmm0, src); |
| 354 __ movdbl(dst, xmm0); |
| 355 } |
343 } else { | 356 } else { |
344 // We rely on having xmm0 available as a fixed scratch register. | 357 UNREACHABLE(); |
345 Operand dst = cgen_->ToOperand(destination); | |
346 __ movdbl(xmm0, src); | |
347 __ movdbl(dst, xmm0); | |
348 } | 358 } |
349 | |
350 } else { | 359 } else { |
351 UNREACHABLE(); | 360 UNREACHABLE(); |
352 } | 361 } |
353 | 362 |
354 RemoveMove(index); | 363 RemoveMove(index); |
355 } | 364 } |
356 | 365 |
357 | 366 |
358 void LGapResolver::EmitSwap(int index) { | 367 void LGapResolver::EmitSwap(int index) { |
359 LOperand* source = moves_[index].source(); | 368 LOperand* source = moves_[index].source(); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
403 __ xor_(src, tmp0); | 412 __ xor_(src, tmp0); |
404 __ xor_(tmp0, src); | 413 __ xor_(tmp0, src); |
405 __ mov(dst, tmp0); | 414 __ mov(dst, tmp0); |
406 } else { | 415 } else { |
407 __ mov(tmp0, dst); | 416 __ mov(tmp0, dst); |
408 __ mov(tmp1, src); | 417 __ mov(tmp1, src); |
409 __ mov(dst, tmp1); | 418 __ mov(dst, tmp1); |
410 __ mov(src, tmp0); | 419 __ mov(src, tmp0); |
411 } | 420 } |
412 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { | 421 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { |
| 422 CpuFeatures::Scope scope(SSE2); |
413 // XMM register-register swap. We rely on having xmm0 | 423 // XMM register-register swap. We rely on having xmm0 |
414 // available as a fixed scratch register. | 424 // available as a fixed scratch register. |
415 XMMRegister src = cgen_->ToDoubleRegister(source); | 425 XMMRegister src = cgen_->ToDoubleRegister(source); |
416 XMMRegister dst = cgen_->ToDoubleRegister(destination); | 426 XMMRegister dst = cgen_->ToDoubleRegister(destination); |
417 __ movaps(xmm0, src); | 427 __ movaps(xmm0, src); |
418 __ movaps(src, dst); | 428 __ movaps(src, dst); |
419 __ movaps(dst, xmm0); | 429 __ movaps(dst, xmm0); |
420 | 430 |
421 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { | 431 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { |
422 // XMM register-memory swap. We rely on having xmm0 | 432 // XMM register-memory swap. We rely on having xmm0 |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
481 } else if (destination->IsRegister()) { | 491 } else if (destination->IsRegister()) { |
482 source_uses_[destination->index()] = CountSourceUses(destination); | 492 source_uses_[destination->index()] = CountSourceUses(destination); |
483 } | 493 } |
484 } | 494 } |
485 | 495 |
486 #undef __ | 496 #undef __ |
487 | 497 |
488 } } // namespace v8::internal | 498 } } // namespace v8::internal |
489 | 499 |
490 #endif // V8_TARGET_ARCH_IA32 | 500 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |