| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/compiler/code-generator-impl.h" | 7 #include "src/compiler/code-generator-impl.h" |
| 8 #include "src/compiler/gap-resolver.h" | 8 #include "src/compiler/gap-resolver.h" |
| 9 #include "src/compiler/node-matchers.h" | 9 #include "src/compiler/node-matchers.h" |
| 10 #include "src/ia32/assembler-ia32.h" | 10 #include "src/ia32/assembler-ia32.h" |
| 11 #include "src/ia32/macro-assembler-ia32.h" | 11 #include "src/ia32/macro-assembler-ia32.h" |
| 12 #include "src/scopes.h" | 12 #include "src/scopes.h" |
| 13 | 13 |
| 14 namespace v8 { | 14 namespace v8 { |
| 15 namespace internal { | 15 namespace internal { |
| 16 namespace compiler { | 16 namespace compiler { |
| 17 | 17 |
| 18 #define __ masm()-> | 18 #define __ masm()-> |
| 19 | 19 |
| 20 | 20 |
| 21 #define kScratchDoubleReg xmm0 |
| 22 |
| 23 |
| 21 // Adds IA-32 specific methods for decoding operands. | 24 // Adds IA-32 specific methods for decoding operands. |
| 22 class IA32OperandConverter : public InstructionOperandConverter { | 25 class IA32OperandConverter : public InstructionOperandConverter { |
| 23 public: | 26 public: |
| 24 IA32OperandConverter(CodeGenerator* gen, Instruction* instr) | 27 IA32OperandConverter(CodeGenerator* gen, Instruction* instr) |
| 25 : InstructionOperandConverter(gen, instr) {} | 28 : InstructionOperandConverter(gen, instr) {} |
| 26 | 29 |
| 27 Operand InputOperand(size_t index, int extra = 0) { | 30 Operand InputOperand(size_t index, int extra = 0) { |
| 28 return ToOperand(instr_->InputAt(index), extra); | 31 return ToOperand(instr_->InputAt(index), extra); |
| 29 } | 32 } |
| 30 | 33 |
| (...skipping 436 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 467 break; | 470 break; |
| 468 case kSSEFloat32Max: | 471 case kSSEFloat32Max: |
| 469 __ maxss(i.InputDoubleRegister(0), i.InputOperand(1)); | 472 __ maxss(i.InputDoubleRegister(0), i.InputOperand(1)); |
| 470 break; | 473 break; |
| 471 case kSSEFloat32Min: | 474 case kSSEFloat32Min: |
| 472 __ minss(i.InputDoubleRegister(0), i.InputOperand(1)); | 475 __ minss(i.InputDoubleRegister(0), i.InputOperand(1)); |
| 473 break; | 476 break; |
| 474 case kSSEFloat32Sqrt: | 477 case kSSEFloat32Sqrt: |
| 475 __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0)); | 478 __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0)); |
| 476 break; | 479 break; |
| 480 case kSSEFloat32Neg: { |
| 481 // TODO(bmeurer): Use 128-bit constants. |
| 482 // TODO(turbofan): Add AVX version with relaxed register constraints. |
| 483 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); |
| 484 __ psllq(kScratchDoubleReg, 31); |
| 485 __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg); |
| 486 break; |
| 487 } |
| 477 case kSSEFloat64Cmp: | 488 case kSSEFloat64Cmp: |
| 478 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1)); | 489 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1)); |
| 479 break; | 490 break; |
| 480 case kSSEFloat64Add: | 491 case kSSEFloat64Add: |
| 481 __ addsd(i.InputDoubleRegister(0), i.InputOperand(1)); | 492 __ addsd(i.InputDoubleRegister(0), i.InputOperand(1)); |
| 482 break; | 493 break; |
| 483 case kSSEFloat64Sub: | 494 case kSSEFloat64Sub: |
| 484 __ subsd(i.InputDoubleRegister(0), i.InputOperand(1)); | 495 __ subsd(i.InputDoubleRegister(0), i.InputOperand(1)); |
| 485 break; | 496 break; |
| 486 case kSSEFloat64Mul: | 497 case kSSEFloat64Mul: |
| (...skipping 26 matching lines...) Expand all Loading... |
| 513 __ fnstsw_ax(); | 524 __ fnstsw_ax(); |
| 514 __ sahf(); | 525 __ sahf(); |
| 515 __ j(parity_even, &mod_loop); | 526 __ j(parity_even, &mod_loop); |
| 516 // Move output to stack and clean up. | 527 // Move output to stack and clean up. |
| 517 __ fstp(1); | 528 __ fstp(1); |
| 518 __ fstp_d(Operand(esp, 0)); | 529 __ fstp_d(Operand(esp, 0)); |
| 519 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); | 530 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); |
| 520 __ add(esp, Immediate(kDoubleSize)); | 531 __ add(esp, Immediate(kDoubleSize)); |
| 521 break; | 532 break; |
| 522 } | 533 } |
| 534 case kSSEFloat64Neg: { |
| 535 // TODO(bmeurer): Use 128-bit constants. |
| 536 // TODO(turbofan): Add AVX version with relaxed register constraints. |
| 537 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); |
| 538 __ psllq(kScratchDoubleReg, 63); |
| 539 __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); |
| 540 break; |
| 541 } |
| 523 case kSSEFloat64Sqrt: | 542 case kSSEFloat64Sqrt: |
| 524 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0)); | 543 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0)); |
| 525 break; | 544 break; |
| 526 case kSSEFloat64Round: { | 545 case kSSEFloat64Round: { |
| 527 CpuFeatureScope sse_scope(masm(), SSE4_1); | 546 CpuFeatureScope sse_scope(masm(), SSE4_1); |
| 528 RoundingMode const mode = | 547 RoundingMode const mode = |
| 529 static_cast<RoundingMode>(MiscField::decode(instr->opcode())); | 548 static_cast<RoundingMode>(MiscField::decode(instr->opcode())); |
| 530 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); | 549 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); |
| 531 break; | 550 break; |
| 532 } | 551 } |
| 533 case kSSEFloat32ToFloat64: | 552 case kSSEFloat32ToFloat64: |
| 534 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0)); | 553 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0)); |
| 535 break; | 554 break; |
| 536 case kSSEFloat64ToFloat32: | 555 case kSSEFloat64ToFloat32: |
| 537 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0)); | 556 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0)); |
| 538 break; | 557 break; |
| 539 case kSSEFloat64ToInt32: | 558 case kSSEFloat64ToInt32: |
| 540 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); | 559 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); |
| 541 break; | 560 break; |
| 542 case kSSEFloat64ToUint32: { | 561 case kSSEFloat64ToUint32: { |
| 543 XMMRegister scratch = xmm0; | 562 __ Move(kScratchDoubleReg, -2147483648.0); |
| 544 __ Move(scratch, -2147483648.0); | 563 __ addsd(kScratchDoubleReg, i.InputOperand(0)); |
| 545 __ addsd(scratch, i.InputOperand(0)); | 564 __ cvttsd2si(i.OutputRegister(), kScratchDoubleReg); |
| 546 __ cvttsd2si(i.OutputRegister(), scratch); | |
| 547 __ add(i.OutputRegister(), Immediate(0x80000000)); | 565 __ add(i.OutputRegister(), Immediate(0x80000000)); |
| 548 break; | 566 break; |
| 549 } | 567 } |
| 550 case kSSEInt32ToFloat64: | 568 case kSSEInt32ToFloat64: |
| 551 __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0)); | 569 __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0)); |
| 552 break; | 570 break; |
| 553 case kSSEUint32ToFloat64: | 571 case kSSEUint32ToFloat64: |
| 554 __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0)); | 572 __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0)); |
| 555 break; | 573 break; |
| 556 case kSSEFloat64ExtractLowWord32: | 574 case kSSEFloat64ExtractLowWord32: |
| (...skipping 739 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1296 Operand dst = g.ToOperand(destination); | 1314 Operand dst = g.ToOperand(destination); |
| 1297 __ movsd(dst, src); | 1315 __ movsd(dst, src); |
| 1298 } | 1316 } |
| 1299 } else if (source->IsDoubleStackSlot()) { | 1317 } else if (source->IsDoubleStackSlot()) { |
| 1300 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); | 1318 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); |
| 1301 Operand src = g.ToOperand(source); | 1319 Operand src = g.ToOperand(source); |
| 1302 if (destination->IsDoubleRegister()) { | 1320 if (destination->IsDoubleRegister()) { |
| 1303 XMMRegister dst = g.ToDoubleRegister(destination); | 1321 XMMRegister dst = g.ToDoubleRegister(destination); |
| 1304 __ movsd(dst, src); | 1322 __ movsd(dst, src); |
| 1305 } else { | 1323 } else { |
| 1306 // We rely on having xmm0 available as a fixed scratch register. | |
| 1307 Operand dst = g.ToOperand(destination); | 1324 Operand dst = g.ToOperand(destination); |
| 1308 __ movsd(xmm0, src); | 1325 __ movsd(kScratchDoubleReg, src); |
| 1309 __ movsd(dst, xmm0); | 1326 __ movsd(dst, kScratchDoubleReg); |
| 1310 } | 1327 } |
| 1311 } else { | 1328 } else { |
| 1312 UNREACHABLE(); | 1329 UNREACHABLE(); |
| 1313 } | 1330 } |
| 1314 } | 1331 } |
| 1315 | 1332 |
| 1316 | 1333 |
| 1317 void CodeGenerator::AssembleSwap(InstructionOperand* source, | 1334 void CodeGenerator::AssembleSwap(InstructionOperand* source, |
| 1318 InstructionOperand* destination) { | 1335 InstructionOperand* destination) { |
| 1319 IA32OperandConverter g(this, NULL); | 1336 IA32OperandConverter g(this, NULL); |
| 1320 // Dispatch on the source and destination operand kinds. Not all | 1337 // Dispatch on the source and destination operand kinds. Not all |
| 1321 // combinations are possible. | 1338 // combinations are possible. |
| 1322 if (source->IsRegister() && destination->IsRegister()) { | 1339 if (source->IsRegister() && destination->IsRegister()) { |
| 1323 // Register-register. | 1340 // Register-register. |
| 1324 Register src = g.ToRegister(source); | 1341 Register src = g.ToRegister(source); |
| 1325 Register dst = g.ToRegister(destination); | 1342 Register dst = g.ToRegister(destination); |
| 1326 __ xchg(dst, src); | 1343 __ xchg(dst, src); |
| 1327 } else if (source->IsRegister() && destination->IsStackSlot()) { | 1344 } else if (source->IsRegister() && destination->IsStackSlot()) { |
| 1328 // Register-memory. | 1345 // Register-memory. |
| 1329 __ xchg(g.ToRegister(source), g.ToOperand(destination)); | 1346 __ xchg(g.ToRegister(source), g.ToOperand(destination)); |
| 1330 } else if (source->IsStackSlot() && destination->IsStackSlot()) { | 1347 } else if (source->IsStackSlot() && destination->IsStackSlot()) { |
| 1331 // Memory-memory. | 1348 // Memory-memory. |
| 1332 Operand src = g.ToOperand(source); | 1349 Operand src = g.ToOperand(source); |
| 1333 Operand dst = g.ToOperand(destination); | 1350 Operand dst = g.ToOperand(destination); |
| 1334 __ push(dst); | 1351 __ push(dst); |
| 1335 __ push(src); | 1352 __ push(src); |
| 1336 __ pop(dst); | 1353 __ pop(dst); |
| 1337 __ pop(src); | 1354 __ pop(src); |
| 1338 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { | 1355 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { |
| 1339 // XMM register-register swap. We rely on having xmm0 | 1356 // XMM register-register swap. |
| 1340 // available as a fixed scratch register. | |
| 1341 XMMRegister src = g.ToDoubleRegister(source); | 1357 XMMRegister src = g.ToDoubleRegister(source); |
| 1342 XMMRegister dst = g.ToDoubleRegister(destination); | 1358 XMMRegister dst = g.ToDoubleRegister(destination); |
| 1343 __ movaps(xmm0, src); | 1359 __ movaps(kScratchDoubleReg, src); |
| 1344 __ movaps(src, dst); | 1360 __ movaps(src, dst); |
| 1345 __ movaps(dst, xmm0); | 1361 __ movaps(dst, kScratchDoubleReg); |
| 1346 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) { | 1362 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) { |
| 1347 // XMM register-memory swap. We rely on having xmm0 | 1363 // XMM register-memory swap. |
| 1348 // available as a fixed scratch register. | |
| 1349 XMMRegister reg = g.ToDoubleRegister(source); | 1364 XMMRegister reg = g.ToDoubleRegister(source); |
| 1350 Operand other = g.ToOperand(destination); | 1365 Operand other = g.ToOperand(destination); |
| 1351 __ movsd(xmm0, other); | 1366 __ movsd(kScratchDoubleReg, other); |
| 1352 __ movsd(other, reg); | 1367 __ movsd(other, reg); |
| 1353 __ movaps(reg, xmm0); | 1368 __ movaps(reg, kScratchDoubleReg); |
| 1354 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { | 1369 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { |
| 1355 // Double-width memory-to-memory. | 1370 // Double-width memory-to-memory. |
| 1356 Operand src0 = g.ToOperand(source); | 1371 Operand src0 = g.ToOperand(source); |
| 1357 Operand src1 = g.HighOperand(source); | 1372 Operand src1 = g.HighOperand(source); |
| 1358 Operand dst0 = g.ToOperand(destination); | 1373 Operand dst0 = g.ToOperand(destination); |
| 1359 Operand dst1 = g.HighOperand(destination); | 1374 Operand dst1 = g.HighOperand(destination); |
| 1360 __ movsd(xmm0, dst0); // Save destination in xmm0. | 1375 __ movsd(kScratchDoubleReg, dst0); // Save destination in scratch register. |
| 1361 __ push(src0); // Then use stack to copy source to destination. | 1376 __ push(src0); // Then use stack to copy source to destination. |
| 1362 __ pop(dst0); | 1377 __ pop(dst0); |
| 1363 __ push(src1); | 1378 __ push(src1); |
| 1364 __ pop(dst1); | 1379 __ pop(dst1); |
| 1365 __ movsd(src0, xmm0); | 1380 __ movsd(src0, kScratchDoubleReg); |
| 1366 } else { | 1381 } else { |
| 1367 // No other combinations are possible. | 1382 // No other combinations are possible. |
| 1368 UNREACHABLE(); | 1383 UNREACHABLE(); |
| 1369 } | 1384 } |
| 1370 } | 1385 } |
| 1371 | 1386 |
| 1372 | 1387 |
| 1373 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { | 1388 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { |
| 1374 for (size_t index = 0; index < target_count; ++index) { | 1389 for (size_t index = 0; index < target_count; ++index) { |
| 1375 __ dd(targets[index]); | 1390 __ dd(targets[index]); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1392 } | 1407 } |
| 1393 } | 1408 } |
| 1394 MarkLazyDeoptSite(); | 1409 MarkLazyDeoptSite(); |
| 1395 } | 1410 } |
| 1396 | 1411 |
| 1397 #undef __ | 1412 #undef __ |
| 1398 | 1413 |
| 1399 } // namespace compiler | 1414 } // namespace compiler |
| 1400 } // namespace internal | 1415 } // namespace internal |
| 1401 } // namespace v8 | 1416 } // namespace v8 |
| OLD | NEW |