| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/ast/scopes.h" | 7 #include "src/ast/scopes.h" |
| 8 #include "src/compiler/code-generator-impl.h" | 8 #include "src/compiler/code-generator-impl.h" |
| 9 #include "src/compiler/gap-resolver.h" | 9 #include "src/compiler/gap-resolver.h" |
| 10 #include "src/compiler/node-matchers.h" | 10 #include "src/compiler/node-matchers.h" |
| (...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 252 return le; | 252 return le; |
| 253 case kSignedGreaterThan: | 253 case kSignedGreaterThan: |
| 254 case kUnsignedGreaterThan: | 254 case kUnsignedGreaterThan: |
| 255 return gt; | 255 return gt; |
| 256 case kOverflow: | 256 case kOverflow: |
| 257 // Overflow checked for add/sub only. | 257 // Overflow checked for add/sub only. |
| 258 switch (op) { | 258 switch (op) { |
| 259 #if V8_TARGET_ARCH_PPC64 | 259 #if V8_TARGET_ARCH_PPC64 |
| 260 case kPPC_Add: | 260 case kPPC_Add: |
| 261 case kPPC_Sub: | 261 case kPPC_Sub: |
| 262 return lt; | |
| 263 #endif | 262 #endif |
| 264 case kPPC_AddWithOverflow32: | 263 case kPPC_AddWithOverflow32: |
| 265 case kPPC_SubWithOverflow32: | 264 case kPPC_SubWithOverflow32: |
| 266 #if V8_TARGET_ARCH_PPC64 | |
| 267 return ne; | |
| 268 #else | |
| 269 return lt; | 265 return lt; |
| 270 #endif | |
| 271 default: | 266 default: |
| 272 break; | 267 break; |
| 273 } | 268 } |
| 274 break; | 269 break; |
| 275 case kNotOverflow: | 270 case kNotOverflow: |
| 276 switch (op) { | 271 switch (op) { |
| 277 #if V8_TARGET_ARCH_PPC64 | 272 #if V8_TARGET_ARCH_PPC64 |
| 278 case kPPC_Add: | 273 case kPPC_Add: |
| 279 case kPPC_Sub: | 274 case kPPC_Sub: |
| 280 return ge; | |
| 281 #endif | 275 #endif |
| 282 case kPPC_AddWithOverflow32: | 276 case kPPC_AddWithOverflow32: |
| 283 case kPPC_SubWithOverflow32: | 277 case kPPC_SubWithOverflow32: |
| 284 #if V8_TARGET_ARCH_PPC64 | |
| 285 return eq; | |
| 286 #else | |
| 287 return ge; | 278 return ge; |
| 288 #endif | |
| 289 default: | 279 default: |
| 290 break; | 280 break; |
| 291 } | 281 } |
| 292 break; | 282 break; |
| 293 default: | 283 default: |
| 294 break; | 284 break; |
| 295 } | 285 } |
| 296 UNREACHABLE(); | 286 UNREACHABLE(); |
| 297 return kNoCondition; | 287 return kNoCondition; |
| 298 } | 288 } |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 371 __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \ | 361 __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \ |
| 372 i.InputRegister(1), kScratchReg, r0); \ | 362 i.InputRegister(1), kScratchReg, r0); \ |
| 373 } else { \ | 363 } else { \ |
| 374 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \ | 364 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \ |
| 375 -i.InputInt32(1), kScratchReg, r0); \ | 365 -i.InputInt32(1), kScratchReg, r0); \ |
| 376 } \ | 366 } \ |
| 377 } while (0) | 367 } while (0) |
| 378 | 368 |
| 379 | 369 |
| 380 #if V8_TARGET_ARCH_PPC64 | 370 #if V8_TARGET_ARCH_PPC64 |
| 381 #define ASSEMBLE_ADD_WITH_OVERFLOW32() \ | 371 #define ASSEMBLE_ADD_WITH_OVERFLOW32() \ |
| 382 do { \ | 372 do { \ |
| 383 ASSEMBLE_BINOP(add, addi); \ | 373 ASSEMBLE_ADD_WITH_OVERFLOW(); \ |
| 384 __ TestIfInt32(i.OutputRegister(), r0, cr0); \ | 374 __ extsw(kScratchReg, kScratchReg, SetRC); \ |
| 385 } while (0) | 375 } while (0) |
| 386 | 376 |
| 387 | 377 #define ASSEMBLE_SUB_WITH_OVERFLOW32() \ |
| 388 #define ASSEMBLE_SUB_WITH_OVERFLOW32() \ | 378 do { \ |
| 389 do { \ | 379 ASSEMBLE_SUB_WITH_OVERFLOW(); \ |
| 390 ASSEMBLE_BINOP(sub, subi); \ | 380 __ extsw(kScratchReg, kScratchReg, SetRC); \ |
| 391 __ TestIfInt32(i.OutputRegister(), r0, cr0); \ | |
| 392 } while (0) | 381 } while (0) |
| 393 #else | 382 #else |
| 394 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW | 383 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW |
| 395 #define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW | 384 #define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW |
| 396 #endif | 385 #endif |
| 397 | 386 |
| 398 | 387 |
| 399 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \ | 388 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \ |
| 400 do { \ | 389 do { \ |
| 401 const CRegister cr = cr0; \ | 390 const CRegister cr = cr0; \ |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 530 Register value = i.InputRegister(index); \ | 519 Register value = i.InputRegister(index); \ |
| 531 if (mode == kMode_MRI) { \ | 520 if (mode == kMode_MRI) { \ |
| 532 __ asm_instr(value, operand); \ | 521 __ asm_instr(value, operand); \ |
| 533 } else { \ | 522 } else { \ |
| 534 __ asm_instrx(value, operand); \ | 523 __ asm_instrx(value, operand); \ |
| 535 } \ | 524 } \ |
| 536 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ | 525 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
| 537 } while (0) | 526 } while (0) |
| 538 | 527 |
| 539 | 528 |
| 540 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits. | |
| 541 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \ | 529 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \ |
| 542 do { \ | 530 do { \ |
| 543 DoubleRegister result = i.OutputDoubleRegister(); \ | 531 DoubleRegister result = i.OutputDoubleRegister(); \ |
| 544 size_t index = 0; \ | 532 size_t index = 0; \ |
| 545 AddressingMode mode = kMode_None; \ | 533 AddressingMode mode = kMode_None; \ |
| 546 MemOperand operand = i.MemoryOperand(&mode, index); \ | 534 MemOperand operand = i.MemoryOperand(&mode, index); \ |
| 547 DCHECK_EQ(kMode_MRR, mode); \ | 535 DCHECK_EQ(kMode_MRR, mode); \ |
| 548 Register offset = operand.rb(); \ | 536 Register offset = operand.rb(); \ |
| 549 __ extsw(offset, offset); \ | |
| 550 if (HasRegisterInput(instr, 2)) { \ | 537 if (HasRegisterInput(instr, 2)) { \ |
| 551 __ cmplw(offset, i.InputRegister(2)); \ | 538 __ cmplw(offset, i.InputRegister(2)); \ |
| 552 } else { \ | 539 } else { \ |
| 553 __ cmplwi(offset, i.InputImmediate(2)); \ | 540 __ cmplwi(offset, i.InputImmediate(2)); \ |
| 554 } \ | 541 } \ |
| 555 auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \ | 542 auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \ |
| 556 __ bge(ool->entry()); \ | 543 __ bge(ool->entry()); \ |
| 557 if (mode == kMode_MRI) { \ | 544 if (mode == kMode_MRI) { \ |
| 558 __ asm_instr(result, operand); \ | 545 __ asm_instr(result, operand); \ |
| 559 } else { \ | 546 } else { \ |
| 560 __ asm_instrx(result, operand); \ | 547 __ asm_instrx(result, operand); \ |
| 561 } \ | 548 } \ |
| 562 __ bind(ool->exit()); \ | 549 __ bind(ool->exit()); \ |
| 563 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ | 550 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
| 564 } while (0) | 551 } while (0) |
| 565 | 552 |
| 566 | 553 |
| 567 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits. | |
| 568 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \ | 554 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \ |
| 569 do { \ | 555 do { \ |
| 570 Register result = i.OutputRegister(); \ | 556 Register result = i.OutputRegister(); \ |
| 571 size_t index = 0; \ | 557 size_t index = 0; \ |
| 572 AddressingMode mode = kMode_None; \ | 558 AddressingMode mode = kMode_None; \ |
| 573 MemOperand operand = i.MemoryOperand(&mode, index); \ | 559 MemOperand operand = i.MemoryOperand(&mode, index); \ |
| 574 DCHECK_EQ(kMode_MRR, mode); \ | 560 DCHECK_EQ(kMode_MRR, mode); \ |
| 575 Register offset = operand.rb(); \ | 561 Register offset = operand.rb(); \ |
| 576 __ extsw(offset, offset); \ | |
| 577 if (HasRegisterInput(instr, 2)) { \ | 562 if (HasRegisterInput(instr, 2)) { \ |
| 578 __ cmplw(offset, i.InputRegister(2)); \ | 563 __ cmplw(offset, i.InputRegister(2)); \ |
| 579 } else { \ | 564 } else { \ |
| 580 __ cmplwi(offset, i.InputImmediate(2)); \ | 565 __ cmplwi(offset, i.InputImmediate(2)); \ |
| 581 } \ | 566 } \ |
| 582 auto ool = new (zone()) OutOfLineLoadZero(this, result); \ | 567 auto ool = new (zone()) OutOfLineLoadZero(this, result); \ |
| 583 __ bge(ool->entry()); \ | 568 __ bge(ool->entry()); \ |
| 584 if (mode == kMode_MRI) { \ | 569 if (mode == kMode_MRI) { \ |
| 585 __ asm_instr(result, operand); \ | 570 __ asm_instr(result, operand); \ |
| 586 } else { \ | 571 } else { \ |
| 587 __ asm_instrx(result, operand); \ | 572 __ asm_instrx(result, operand); \ |
| 588 } \ | 573 } \ |
| 589 __ bind(ool->exit()); \ | 574 __ bind(ool->exit()); \ |
| 590 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ | 575 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
| 591 } while (0) | 576 } while (0) |
| 592 | 577 |
| 593 | 578 |
| 594 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits. | |
| 595 #define ASSEMBLE_CHECKED_STORE_FLOAT32() \ | 579 #define ASSEMBLE_CHECKED_STORE_FLOAT32() \ |
| 596 do { \ | 580 do { \ |
| 597 Label done; \ | 581 Label done; \ |
| 598 size_t index = 0; \ | 582 size_t index = 0; \ |
| 599 AddressingMode mode = kMode_None; \ | 583 AddressingMode mode = kMode_None; \ |
| 600 MemOperand operand = i.MemoryOperand(&mode, index); \ | 584 MemOperand operand = i.MemoryOperand(&mode, index); \ |
| 601 DCHECK_EQ(kMode_MRR, mode); \ | 585 DCHECK_EQ(kMode_MRR, mode); \ |
| 602 Register offset = operand.rb(); \ | 586 Register offset = operand.rb(); \ |
| 603 __ extsw(offset, offset); \ | |
| 604 if (HasRegisterInput(instr, 2)) { \ | 587 if (HasRegisterInput(instr, 2)) { \ |
| 605 __ cmplw(offset, i.InputRegister(2)); \ | 588 __ cmplw(offset, i.InputRegister(2)); \ |
| 606 } else { \ | 589 } else { \ |
| 607 __ cmplwi(offset, i.InputImmediate(2)); \ | 590 __ cmplwi(offset, i.InputImmediate(2)); \ |
| 608 } \ | 591 } \ |
| 609 __ bge(&done); \ | 592 __ bge(&done); \ |
| 610 DoubleRegister value = i.InputDoubleRegister(3); \ | 593 DoubleRegister value = i.InputDoubleRegister(3); \ |
| 611 __ frsp(kScratchDoubleReg, value); \ | 594 __ frsp(kScratchDoubleReg, value); \ |
| 612 if (mode == kMode_MRI) { \ | 595 if (mode == kMode_MRI) { \ |
| 613 __ stfs(kScratchDoubleReg, operand); \ | 596 __ stfs(kScratchDoubleReg, operand); \ |
| 614 } else { \ | 597 } else { \ |
| 615 __ stfsx(kScratchDoubleReg, operand); \ | 598 __ stfsx(kScratchDoubleReg, operand); \ |
| 616 } \ | 599 } \ |
| 617 __ bind(&done); \ | 600 __ bind(&done); \ |
| 618 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ | 601 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
| 619 } while (0) | 602 } while (0) |
| 620 | 603 |
| 621 | 604 |
| 622 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits. | |
| 623 #define ASSEMBLE_CHECKED_STORE_DOUBLE() \ | 605 #define ASSEMBLE_CHECKED_STORE_DOUBLE() \ |
| 624 do { \ | 606 do { \ |
| 625 Label done; \ | 607 Label done; \ |
| 626 size_t index = 0; \ | 608 size_t index = 0; \ |
| 627 AddressingMode mode = kMode_None; \ | 609 AddressingMode mode = kMode_None; \ |
| 628 MemOperand operand = i.MemoryOperand(&mode, index); \ | 610 MemOperand operand = i.MemoryOperand(&mode, index); \ |
| 629 DCHECK_EQ(kMode_MRR, mode); \ | 611 DCHECK_EQ(kMode_MRR, mode); \ |
| 630 Register offset = operand.rb(); \ | 612 Register offset = operand.rb(); \ |
| 631 __ extsw(offset, offset); \ | |
| 632 if (HasRegisterInput(instr, 2)) { \ | 613 if (HasRegisterInput(instr, 2)) { \ |
| 633 __ cmplw(offset, i.InputRegister(2)); \ | 614 __ cmplw(offset, i.InputRegister(2)); \ |
| 634 } else { \ | 615 } else { \ |
| 635 __ cmplwi(offset, i.InputImmediate(2)); \ | 616 __ cmplwi(offset, i.InputImmediate(2)); \ |
| 636 } \ | 617 } \ |
| 637 __ bge(&done); \ | 618 __ bge(&done); \ |
| 638 DoubleRegister value = i.InputDoubleRegister(3); \ | 619 DoubleRegister value = i.InputDoubleRegister(3); \ |
| 639 if (mode == kMode_MRI) { \ | 620 if (mode == kMode_MRI) { \ |
| 640 __ stfd(value, operand); \ | 621 __ stfd(value, operand); \ |
| 641 } else { \ | 622 } else { \ |
| 642 __ stfdx(value, operand); \ | 623 __ stfdx(value, operand); \ |
| 643 } \ | 624 } \ |
| 644 __ bind(&done); \ | 625 __ bind(&done); \ |
| 645 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ | 626 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
| 646 } while (0) | 627 } while (0) |
| 647 | 628 |
| 648 | 629 |
| 649 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits. | |
| 650 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \ | 630 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \ |
| 651 do { \ | 631 do { \ |
| 652 Label done; \ | 632 Label done; \ |
| 653 size_t index = 0; \ | 633 size_t index = 0; \ |
| 654 AddressingMode mode = kMode_None; \ | 634 AddressingMode mode = kMode_None; \ |
| 655 MemOperand operand = i.MemoryOperand(&mode, index); \ | 635 MemOperand operand = i.MemoryOperand(&mode, index); \ |
| 656 DCHECK_EQ(kMode_MRR, mode); \ | 636 DCHECK_EQ(kMode_MRR, mode); \ |
| 657 Register offset = operand.rb(); \ | 637 Register offset = operand.rb(); \ |
| 658 __ extsw(offset, offset); \ | |
| 659 if (HasRegisterInput(instr, 2)) { \ | 638 if (HasRegisterInput(instr, 2)) { \ |
| 660 __ cmplw(offset, i.InputRegister(2)); \ | 639 __ cmplw(offset, i.InputRegister(2)); \ |
| 661 } else { \ | 640 } else { \ |
| 662 __ cmplwi(offset, i.InputImmediate(2)); \ | 641 __ cmplwi(offset, i.InputImmediate(2)); \ |
| 663 } \ | 642 } \ |
| 664 __ bge(&done); \ | 643 __ bge(&done); \ |
| 665 Register value = i.InputRegister(3); \ | 644 Register value = i.InputRegister(3); \ |
| 666 if (mode == kMode_MRI) { \ | 645 if (mode == kMode_MRI) { \ |
| 667 __ asm_instr(value, operand); \ | 646 __ asm_instr(value, operand); \ |
| 668 } else { \ | 647 } else { \ |
| (...skipping 840 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1509 case kPPC_LoadWordS8: | 1488 case kPPC_LoadWordS8: |
| 1510 ASSEMBLE_LOAD_INTEGER(lbz, lbzx); | 1489 ASSEMBLE_LOAD_INTEGER(lbz, lbzx); |
| 1511 __ extsb(i.OutputRegister(), i.OutputRegister()); | 1490 __ extsb(i.OutputRegister(), i.OutputRegister()); |
| 1512 break; | 1491 break; |
| 1513 case kPPC_LoadWordU16: | 1492 case kPPC_LoadWordU16: |
| 1514 ASSEMBLE_LOAD_INTEGER(lhz, lhzx); | 1493 ASSEMBLE_LOAD_INTEGER(lhz, lhzx); |
| 1515 break; | 1494 break; |
| 1516 case kPPC_LoadWordS16: | 1495 case kPPC_LoadWordS16: |
| 1517 ASSEMBLE_LOAD_INTEGER(lha, lhax); | 1496 ASSEMBLE_LOAD_INTEGER(lha, lhax); |
| 1518 break; | 1497 break; |
| 1498 case kPPC_LoadWordU32: |
| 1499 ASSEMBLE_LOAD_INTEGER(lwz, lwzx); |
| 1500 break; |
| 1519 case kPPC_LoadWordS32: | 1501 case kPPC_LoadWordS32: |
| 1520 ASSEMBLE_LOAD_INTEGER(lwa, lwax); | 1502 ASSEMBLE_LOAD_INTEGER(lwa, lwax); |
| 1521 break; | 1503 break; |
| 1522 #if V8_TARGET_ARCH_PPC64 | 1504 #if V8_TARGET_ARCH_PPC64 |
| 1523 case kPPC_LoadWord64: | 1505 case kPPC_LoadWord64: |
| 1524 ASSEMBLE_LOAD_INTEGER(ld, ldx); | 1506 ASSEMBLE_LOAD_INTEGER(ld, ldx); |
| 1525 break; | 1507 break; |
| 1526 #endif | 1508 #endif |
| 1527 case kPPC_LoadFloat32: | 1509 case kPPC_LoadFloat32: |
| 1528 ASSEMBLE_LOAD_FLOAT(lfs, lfsx); | 1510 ASSEMBLE_LOAD_FLOAT(lfs, lfsx); |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1557 case kCheckedLoadUint8: | 1539 case kCheckedLoadUint8: |
| 1558 ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx); | 1540 ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx); |
| 1559 break; | 1541 break; |
| 1560 case kCheckedLoadInt16: | 1542 case kCheckedLoadInt16: |
| 1561 ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax); | 1543 ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax); |
| 1562 break; | 1544 break; |
| 1563 case kCheckedLoadUint16: | 1545 case kCheckedLoadUint16: |
| 1564 ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx); | 1546 ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx); |
| 1565 break; | 1547 break; |
| 1566 case kCheckedLoadWord32: | 1548 case kCheckedLoadWord32: |
| 1567 ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax); | 1549 ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx); |
| 1568 break; | 1550 break; |
| 1569 case kCheckedLoadWord64: | 1551 case kCheckedLoadWord64: |
| 1570 #if V8_TARGET_ARCH_PPC64 | 1552 #if V8_TARGET_ARCH_PPC64 |
| 1571 ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx); | 1553 ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx); |
| 1572 #else | 1554 #else |
| 1573 UNREACHABLE(); | 1555 UNREACHABLE(); |
| 1574 #endif | 1556 #endif |
| 1575 break; | 1557 break; |
| 1576 case kCheckedLoadFloat32: | 1558 case kCheckedLoadFloat32: |
| 1577 ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32); | 1559 ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32); |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1609 case kAtomicLoadUint8: | 1591 case kAtomicLoadUint8: |
| 1610 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx); | 1592 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx); |
| 1611 break; | 1593 break; |
| 1612 case kAtomicLoadInt16: | 1594 case kAtomicLoadInt16: |
| 1613 ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax); | 1595 ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax); |
| 1614 break; | 1596 break; |
| 1615 case kAtomicLoadUint16: | 1597 case kAtomicLoadUint16: |
| 1616 ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx); | 1598 ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx); |
| 1617 break; | 1599 break; |
| 1618 case kAtomicLoadWord32: | 1600 case kAtomicLoadWord32: |
| 1619 ASSEMBLE_ATOMIC_LOAD_INTEGER(lwa, lwax); | 1601 ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx); |
| 1620 break; | 1602 break; |
| 1621 default: | 1603 default: |
| 1622 UNREACHABLE(); | 1604 UNREACHABLE(); |
| 1623 break; | 1605 break; |
| 1624 } | 1606 } |
| 1625 } // NOLINT(readability/fn_size) | 1607 } // NOLINT(readability/fn_size) |
| 1626 | 1608 |
| 1627 | 1609 |
| 1628 // Assembles branches after an instruction. | 1610 // Assembles branches after an instruction. |
| 1629 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { | 1611 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1710 __ li(reg, Operand(1)); | 1692 __ li(reg, Operand(1)); |
| 1711 } | 1693 } |
| 1712 __ bind(&done); | 1694 __ bind(&done); |
| 1713 } | 1695 } |
| 1714 | 1696 |
| 1715 | 1697 |
| 1716 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) { | 1698 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) { |
| 1717 PPCOperandConverter i(this, instr); | 1699 PPCOperandConverter i(this, instr); |
| 1718 Register input = i.InputRegister(0); | 1700 Register input = i.InputRegister(0); |
| 1719 for (size_t index = 2; index < instr->InputCount(); index += 2) { | 1701 for (size_t index = 2; index < instr->InputCount(); index += 2) { |
| 1720 __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0); | 1702 __ Cmpwi(input, Operand(i.InputInt32(index + 0)), r0); |
| 1721 __ beq(GetLabel(i.InputRpo(index + 1))); | 1703 __ beq(GetLabel(i.InputRpo(index + 1))); |
| 1722 } | 1704 } |
| 1723 AssembleArchJump(i.InputRpo(1)); | 1705 AssembleArchJump(i.InputRpo(1)); |
| 1724 } | 1706 } |
| 1725 | 1707 |
| 1726 | 1708 |
| 1727 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { | 1709 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { |
| 1728 PPCOperandConverter i(this, instr); | 1710 PPCOperandConverter i(this, instr); |
| 1729 Register input = i.InputRegister(0); | 1711 Register input = i.InputRegister(0); |
| 1730 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2); | 1712 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2); |
| (...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2094 padding_size -= v8::internal::Assembler::kInstrSize; | 2076 padding_size -= v8::internal::Assembler::kInstrSize; |
| 2095 } | 2077 } |
| 2096 } | 2078 } |
| 2097 } | 2079 } |
| 2098 | 2080 |
| 2099 #undef __ | 2081 #undef __ |
| 2100 | 2082 |
| 2101 } // namespace compiler | 2083 } // namespace compiler |
| 2102 } // namespace internal | 2084 } // namespace internal |
| 2103 } // namespace v8 | 2085 } // namespace v8 |
| OLD | NEW |