| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 503 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 514 // Compute lower part of fraction (last 12 bits). | 514 // Compute lower part of fraction (last 12 bits). |
| 515 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord); | 515 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord); |
| 516 // And the top (top 20 bits). | 516 // And the top (top 20 bits). |
| 517 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord); | 517 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord); |
| 518 | 518 |
| 519 __ Ret(USE_DELAY_SLOT); | 519 __ Ret(USE_DELAY_SLOT); |
| 520 __ or_(exponent, exponent, source_); | 520 __ or_(exponent, exponent, source_); |
| 521 } | 521 } |
| 522 | 522 |
| 523 | 523 |
| 524 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | |
| 525 FloatingPointHelper::Destination destination, | |
| 526 Register scratch1, | |
| 527 Register scratch2) { | |
| 528 __ sra(scratch1, a0, kSmiTagSize); | |
| 529 __ mtc1(scratch1, f14); | |
| 530 __ cvt_d_w(f14, f14); | |
| 531 __ sra(scratch1, a1, kSmiTagSize); | |
| 532 __ mtc1(scratch1, f12); | |
| 533 __ cvt_d_w(f12, f12); | |
| 534 if (destination == kCoreRegisters) { | |
| 535 __ Move(a2, a3, f14); | |
| 536 __ Move(a0, a1, f12); | |
| 537 } | |
| 538 } | |
| 539 | |
| 540 | |
| 541 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | |
| 542 Destination destination, | |
| 543 Register object, | |
| 544 FPURegister dst, | |
| 545 Register dst1, | |
| 546 Register dst2, | |
| 547 Register heap_number_map, | |
| 548 Register scratch1, | |
| 549 Register scratch2, | |
| 550 Label* not_number) { | |
| 551 __ AssertRootValue(heap_number_map, | |
| 552 Heap::kHeapNumberMapRootIndex, | |
| 553 kHeapNumberMapRegisterClobbered); | |
| 554 | |
| 555 Label is_smi, done; | |
| 556 | |
| 557 // Smi-check | |
| 558 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); | |
| 559 // Heap number check | |
| 560 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | |
| 561 | |
| 562 // Handle loading a double from a heap number. | |
| 563 if (destination == kFPURegisters) { | |
| 564 // Load the double from tagged HeapNumber to double register. | |
| 565 | |
| 566 // ARM uses a workaround here because of the unaligned HeapNumber | |
| 567 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no | |
| 568 // point in generating even more instructions. | |
| 569 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 570 } else { | |
| 571 ASSERT(destination == kCoreRegisters); | |
| 572 // Load the double from heap number to dst1 and dst2 in double format. | |
| 573 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 574 __ lw(dst2, FieldMemOperand(object, | |
| 575 HeapNumber::kValueOffset + kPointerSize)); | |
| 576 } | |
| 577 __ Branch(&done); | |
| 578 | |
| 579 // Handle loading a double from a smi. | |
| 580 __ bind(&is_smi); | |
| 581 // Convert smi to double using FPU instructions. | |
| 582 __ mtc1(scratch1, dst); | |
| 583 __ cvt_d_w(dst, dst); | |
| 584 if (destination == kCoreRegisters) { | |
| 585 // Load the converted smi to dst1 and dst2 in double format. | |
| 586 __ Move(dst1, dst2, dst); | |
| 587 } | |
| 588 __ bind(&done); | |
| 589 } | |
| 590 | |
| 591 | |
| 592 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | |
| 593 Register object, | |
| 594 Register dst, | |
| 595 Register heap_number_map, | |
| 596 Register scratch1, | |
| 597 Register scratch2, | |
| 598 Register scratch3, | |
| 599 FPURegister double_scratch, | |
| 600 Label* not_number) { | |
| 601 __ AssertRootValue(heap_number_map, | |
| 602 Heap::kHeapNumberMapRootIndex, | |
| 603 kHeapNumberMapRegisterClobbered); | |
| 604 Label done; | |
| 605 Label not_in_int32_range; | |
| 606 | |
| 607 __ UntagAndJumpIfSmi(dst, object, &done); | |
| 608 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); | |
| 609 __ Branch(not_number, ne, scratch1, Operand(heap_number_map)); | |
| 610 __ ConvertToInt32(object, | |
| 611 dst, | |
| 612 scratch1, | |
| 613 scratch2, | |
| 614 double_scratch, | |
| 615 ¬_in_int32_range); | |
| 616 __ jmp(&done); | |
| 617 | |
| 618 __ bind(¬_in_int32_range); | |
| 619 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
| 620 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
| 621 | |
| 622 __ EmitOutOfInt32RangeTruncate(dst, | |
| 623 scratch1, | |
| 624 scratch2, | |
| 625 scratch3); | |
| 626 | |
| 627 __ bind(&done); | |
| 628 } | |
| 629 | |
| 630 | |
| 631 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, | |
| 632 Register int_scratch, | |
| 633 Destination destination, | |
| 634 FPURegister double_dst, | |
| 635 Register dst_mantissa, | |
| 636 Register dst_exponent, | |
| 637 Register scratch2, | |
| 638 FPURegister single_scratch) { | |
| 639 ASSERT(!int_scratch.is(scratch2)); | |
| 640 ASSERT(!int_scratch.is(dst_mantissa)); | |
| 641 ASSERT(!int_scratch.is(dst_exponent)); | |
| 642 | |
| 643 __ mtc1(int_scratch, single_scratch); | |
| 644 __ cvt_d_w(double_dst, single_scratch); | |
| 645 if (destination == kCoreRegisters) { | |
| 646 __ Move(dst_mantissa, dst_exponent, double_dst); | |
| 647 } | |
| 648 } | |
| 649 | |
| 650 | |
| 651 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | |
| 652 Register object, | |
| 653 Destination destination, | |
| 654 DoubleRegister double_dst, | |
| 655 DoubleRegister double_scratch, | |
| 656 Register dst_mantissa, | |
| 657 Register dst_exponent, | |
| 658 Register heap_number_map, | |
| 659 Register scratch1, | |
| 660 Register scratch2, | |
| 661 FPURegister single_scratch, | |
| 662 Label* not_int32) { | |
| 663 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | |
| 664 ASSERT(!scratch1.is(scratch2)); | |
| 665 ASSERT(!heap_number_map.is(object) && | |
| 666 !heap_number_map.is(scratch1) && | |
| 667 !heap_number_map.is(scratch2)); | |
| 668 | |
| 669 Label done, obj_is_not_smi; | |
| 670 | |
| 671 __ JumpIfNotSmi(object, &obj_is_not_smi); | |
| 672 __ SmiUntag(scratch1, object); | |
| 673 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, | |
| 674 dst_exponent, scratch2, single_scratch); | |
| 675 __ Branch(&done); | |
| 676 | |
| 677 __ bind(&obj_is_not_smi); | |
| 678 __ AssertRootValue(heap_number_map, | |
| 679 Heap::kHeapNumberMapRootIndex, | |
| 680 kHeapNumberMapRegisterClobbered); | |
| 681 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | |
| 682 | |
| 683 // Load the number. | |
| 684 // Load the double value. | |
| 685 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 686 | |
| 687 Register except_flag = scratch2; | |
| 688 __ EmitFPUTruncate(kRoundToZero, | |
| 689 scratch1, | |
| 690 double_dst, | |
| 691 at, | |
| 692 double_scratch, | |
| 693 except_flag, | |
| 694 kCheckForInexactConversion); | |
| 695 | |
| 696 // Jump to not_int32 if the operation did not succeed. | |
| 697 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); | |
| 698 if (destination == kCoreRegisters) { | |
| 699 __ Move(dst_mantissa, dst_exponent, double_dst); | |
| 700 } | |
| 701 __ bind(&done); | |
| 702 } | |
| 703 | |
| 704 | |
| 705 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | |
| 706 Register object, | |
| 707 Register dst, | |
| 708 Register heap_number_map, | |
| 709 Register scratch1, | |
| 710 Register scratch2, | |
| 711 Register scratch3, | |
| 712 DoubleRegister double_scratch0, | |
| 713 DoubleRegister double_scratch1, | |
| 714 Label* not_int32) { | |
| 715 ASSERT(!dst.is(object)); | |
| 716 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | |
| 717 ASSERT(!scratch1.is(scratch2) && | |
| 718 !scratch1.is(scratch3) && | |
| 719 !scratch2.is(scratch3)); | |
| 720 | |
| 721 Label done, maybe_undefined; | |
| 722 | |
| 723 __ UntagAndJumpIfSmi(dst, object, &done); | |
| 724 | |
| 725 __ AssertRootValue(heap_number_map, | |
| 726 Heap::kHeapNumberMapRootIndex, | |
| 727 kHeapNumberMapRegisterClobbered); | |
| 728 | |
| 729 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); | |
| 730 | |
| 731 // Object is a heap number. | |
| 732 // Convert the floating point value to a 32-bit integer. | |
| 733 // Load the double value. | |
| 734 __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 735 | |
| 736 Register except_flag = scratch2; | |
| 737 __ EmitFPUTruncate(kRoundToZero, | |
| 738 dst, | |
| 739 double_scratch0, | |
| 740 scratch1, | |
| 741 double_scratch1, | |
| 742 except_flag, | |
| 743 kCheckForInexactConversion); | |
| 744 | |
| 745 // Jump to not_int32 if the operation did not succeed. | |
| 746 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); | |
| 747 __ Branch(&done); | |
| 748 | |
| 749 __ bind(&maybe_undefined); | |
| 750 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | |
| 751 __ Branch(not_int32, ne, object, Operand(at)); | |
| 752 // |undefined| is truncated to 0. | |
| 753 __ li(dst, Operand(Smi::FromInt(0))); | |
| 754 // Fall through. | |
| 755 | |
| 756 __ bind(&done); | |
| 757 } | |
| 758 | |
| 759 | |
| 760 void FloatingPointHelper::CallCCodeForDoubleOperation( | |
| 761 MacroAssembler* masm, | |
| 762 Token::Value op, | |
| 763 Register heap_number_result, | |
| 764 Register scratch) { | |
| 765 // Using core registers: | |
| 766 // a0: Left value (least significant part of mantissa). | |
| 767 // a1: Left value (sign, exponent, top of mantissa). | |
| 768 // a2: Right value (least significant part of mantissa). | |
| 769 // a3: Right value (sign, exponent, top of mantissa). | |
| 770 | |
| 771 // Assert that heap_number_result is saved. | |
| 772 // We currently always use s0 to pass it. | |
| 773 ASSERT(heap_number_result.is(s0)); | |
| 774 | |
| 775 // Push the current return address before the C call. | |
| 776 __ push(ra); | |
| 777 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. | |
| 778 if (!IsMipsSoftFloatABI) { | |
| 779 // We are not using MIPS FPU instructions, and parameters for the runtime | |
| 780 // function call are prepaired in a0-a3 registers, but function we are | |
| 781 // calling is compiled with hard-float flag and expecting hard float ABI | |
| 782 // (parameters in f12/f14 registers). We need to copy parameters from | |
| 783 // a0-a3 registers to f12/f14 register pairs. | |
| 784 __ Move(f12, a0, a1); | |
| 785 __ Move(f14, a2, a3); | |
| 786 } | |
| 787 { | |
| 788 AllowExternalCallThatCantCauseGC scope(masm); | |
| 789 __ CallCFunction( | |
| 790 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | |
| 791 } | |
| 792 // Store answer in the overwritable heap number. | |
| 793 if (!IsMipsSoftFloatABI) { | |
| 794 // Double returned in register f0. | |
| 795 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
| 796 } else { | |
| 797 // Double returned in registers v0 and v1. | |
| 798 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); | |
| 799 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); | |
| 800 } | |
| 801 // Place heap_number_result in v0 and return to the pushed return address. | |
| 802 __ pop(ra); | |
| 803 __ Ret(USE_DELAY_SLOT); | |
| 804 __ mov(v0, heap_number_result); | |
| 805 } | |
| 806 | |
| 807 | |
| 808 bool WriteInt32ToHeapNumberStub::IsPregenerated() { | 524 bool WriteInt32ToHeapNumberStub::IsPregenerated() { |
| 809 // These variants are compiled ahead of time. See next method. | 525 // These variants are compiled ahead of time. See next method. |
| 810 if (the_int_.is(a1) && | 526 if (the_int_.is(a1) && |
| 811 the_heap_number_.is(v0) && | 527 the_heap_number_.is(v0) && |
| 812 scratch_.is(a2) && | 528 scratch_.is(a2) && |
| 813 sign_.is(a3)) { | 529 sign_.is(a3)) { |
| 814 return true; | 530 return true; |
| 815 } | 531 } |
| 816 if (the_int_.is(a2) && | 532 if (the_int_.is(a2) && |
| 817 the_heap_number_.is(v0) && | 533 the_heap_number_.is(v0) && |
| (...skipping 647 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1465 argument_count); | 1181 argument_count); |
| 1466 if (save_doubles_ == kSaveFPRegs) { | 1182 if (save_doubles_ == kSaveFPRegs) { |
| 1467 __ MultiPopFPU(kCallerSavedFPU); | 1183 __ MultiPopFPU(kCallerSavedFPU); |
| 1468 } | 1184 } |
| 1469 | 1185 |
| 1470 __ MultiPop(kJSCallerSaved | ra.bit()); | 1186 __ MultiPop(kJSCallerSaved | ra.bit()); |
| 1471 __ Ret(); | 1187 __ Ret(); |
| 1472 } | 1188 } |
| 1473 | 1189 |
| 1474 | 1190 |
| 1191 // Generates code to call a C function to do a double operation. |
| 1192 // This code never falls through, but returns with a heap number containing |
| 1193 // the result in v0. |
| 1194 // Register heap_number_result must be a heap number in which the |
| 1195 // result of the operation will be stored. |
| 1196 // Requires the following layout on entry: |
| 1197 // a0: Left value (least significant part of mantissa). |
| 1198 // a1: Left value (sign, exponent, top of mantissa). |
| 1199 // a2: Right value (least significant part of mantissa). |
| 1200 // a3: Right value (sign, exponent, top of mantissa). |
| 1201 static void CallCCodeForDoubleOperation(MacroAssembler* masm, |
| 1202 Token::Value op, |
| 1203 Register heap_number_result, |
| 1204 Register scratch) { |
| 1205 // Assert that heap_number_result is saved. |
| 1206 // We currently always use s0 to pass it. |
| 1207 ASSERT(heap_number_result.is(s0)); |
| 1208 |
| 1209 // Push the current return address before the C call. |
| 1210 __ push(ra); |
| 1211 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. |
| 1212 { |
| 1213 AllowExternalCallThatCantCauseGC scope(masm); |
| 1214 __ CallCFunction( |
| 1215 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
| 1216 } |
| 1217 // Store answer in the overwritable heap number. |
| 1218 // Double returned in register f0. |
| 1219 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
| 1220 // Place heap_number_result in v0 and return to the pushed return address. |
| 1221 __ pop(ra); |
| 1222 __ Ret(USE_DELAY_SLOT); |
| 1223 __ mov(v0, heap_number_result); |
| 1224 } |
| 1225 |
| 1226 |
| 1475 void BinaryOpStub::Initialize() { | 1227 void BinaryOpStub::Initialize() { |
| 1476 platform_specific_bit_ = true; // FPU is a base requirement for V8. | 1228 platform_specific_bit_ = true; // FPU is a base requirement for V8. |
| 1477 } | 1229 } |
| 1478 | 1230 |
| 1479 | 1231 |
| 1480 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 1232 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 1481 Label get_result; | 1233 Label get_result; |
| 1482 | 1234 |
| 1483 __ Push(a1, a0); | 1235 __ Push(a1, a0); |
| 1484 | 1236 |
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1692 | 1444 |
| 1693 Register heap_number_map = t2; | 1445 Register heap_number_map = t2; |
| 1694 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1446 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1695 | 1447 |
| 1696 switch (op) { | 1448 switch (op) { |
| 1697 case Token::ADD: | 1449 case Token::ADD: |
| 1698 case Token::SUB: | 1450 case Token::SUB: |
| 1699 case Token::MUL: | 1451 case Token::MUL: |
| 1700 case Token::DIV: | 1452 case Token::DIV: |
| 1701 case Token::MOD: { | 1453 case Token::MOD: { |
| 1702 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3 | |
| 1703 // depending on operation. | |
| 1704 FloatingPointHelper::Destination destination = | |
| 1705 op != Token::MOD ? | |
| 1706 FloatingPointHelper::kFPURegisters : | |
| 1707 FloatingPointHelper::kCoreRegisters; | |
| 1708 | |
| 1709 // Allocate new heap number for result. | 1454 // Allocate new heap number for result. |
| 1710 Register result = s0; | 1455 Register result = s0; |
| 1711 BinaryOpStub_GenerateHeapResultAllocation( | 1456 BinaryOpStub_GenerateHeapResultAllocation( |
| 1712 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 1457 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
| 1713 | 1458 |
| 1714 // Load the operands. | 1459 // Load left and right operands into f12 and f14. |
| 1715 if (smi_operands) { | 1460 if (smi_operands) { |
| 1716 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); | 1461 __ SmiUntag(scratch1, a0); |
| 1462 __ mtc1(scratch1, f14); |
| 1463 __ cvt_d_w(f14, f14); |
| 1464 __ SmiUntag(scratch1, a1); |
| 1465 __ mtc1(scratch1, f12); |
| 1466 __ cvt_d_w(f12, f12); |
| 1717 } else { | 1467 } else { |
| 1718 // Load right operand to f14 or a2/a3. | 1468 // Load right operand to f14. |
| 1719 if (right_type == BinaryOpIC::INT32) { | 1469 if (right_type == BinaryOpIC::INT32) { |
| 1720 FloatingPointHelper::LoadNumberAsInt32Double( | 1470 __ LoadNumberAsInt32Double( |
| 1721 masm, right, destination, f14, f16, a2, a3, heap_number_map, | 1471 right, f14, heap_number_map, scratch1, scratch2, f2, miss); |
| 1722 scratch1, scratch2, f2, miss); | |
| 1723 } else { | 1472 } else { |
| 1724 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | 1473 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
| 1725 FloatingPointHelper::LoadNumber( | 1474 __ LoadNumber(right, f14, heap_number_map, scratch1, fail); |
| 1726 masm, destination, right, f14, a2, a3, heap_number_map, | |
| 1727 scratch1, scratch2, fail); | |
| 1728 } | 1475 } |
| 1729 // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it | 1476 // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it |
| 1730 // jumps to |miss|. | 1477 // jumps to |miss|. |
| 1731 if (left_type == BinaryOpIC::INT32) { | 1478 if (left_type == BinaryOpIC::INT32) { |
| 1732 FloatingPointHelper::LoadNumberAsInt32Double( | 1479 __ LoadNumberAsInt32Double( |
| 1733 masm, left, destination, f12, f16, a0, a1, heap_number_map, | 1480 left, f12, heap_number_map, scratch1, scratch2, f2, miss); |
| 1734 scratch1, scratch2, f2, miss); | |
| 1735 } else { | 1481 } else { |
| 1736 Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | 1482 Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
| 1737 FloatingPointHelper::LoadNumber( | 1483 __ LoadNumber(left, f12, heap_number_map, scratch1, fail); |
| 1738 masm, destination, left, f12, a0, a1, heap_number_map, | |
| 1739 scratch1, scratch2, fail); | |
| 1740 } | 1484 } |
| 1741 } | 1485 } |
| 1742 | 1486 |
| 1743 // Calculate the result. | 1487 // Calculate the result. |
| 1744 if (destination == FloatingPointHelper::kFPURegisters) { | 1488 if (op != Token::MOD) { |
| 1745 // Using FPU registers: | 1489 // Using FPU registers: |
| 1746 // f12: Left value. | 1490 // f12: Left value. |
| 1747 // f14: Right value. | 1491 // f14: Right value. |
| 1748 switch (op) { | 1492 switch (op) { |
| 1749 case Token::ADD: | 1493 case Token::ADD: |
| 1750 __ add_d(f10, f12, f14); | 1494 __ add_d(f10, f12, f14); |
| 1751 break; | 1495 break; |
| 1752 case Token::SUB: | 1496 case Token::SUB: |
| 1753 __ sub_d(f10, f12, f14); | 1497 __ sub_d(f10, f12, f14); |
| 1754 break; | 1498 break; |
| 1755 case Token::MUL: | 1499 case Token::MUL: |
| 1756 __ mul_d(f10, f12, f14); | 1500 __ mul_d(f10, f12, f14); |
| 1757 break; | 1501 break; |
| 1758 case Token::DIV: | 1502 case Token::DIV: |
| 1759 __ div_d(f10, f12, f14); | 1503 __ div_d(f10, f12, f14); |
| 1760 break; | 1504 break; |
| 1761 default: | 1505 default: |
| 1762 UNREACHABLE(); | 1506 UNREACHABLE(); |
| 1763 } | 1507 } |
| 1764 | 1508 |
| 1765 // ARM uses a workaround here because of the unaligned HeapNumber | 1509 // ARM uses a workaround here because of the unaligned HeapNumber |
| 1766 // kValueOffset. On MIPS this workaround is built into sdc1 so | 1510 // kValueOffset. On MIPS this workaround is built into sdc1 so |
| 1767 // there's no point in generating even more instructions. | 1511 // there's no point in generating even more instructions. |
| 1768 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); | 1512 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 1769 __ Ret(USE_DELAY_SLOT); | 1513 __ Ret(USE_DELAY_SLOT); |
| 1770 __ mov(v0, result); | 1514 __ mov(v0, result); |
| 1771 } else { | 1515 } else { |
| 1772 // Call the C function to handle the double operation. | 1516 // Call the C function to handle the double operation. |
| 1773 FloatingPointHelper::CallCCodeForDoubleOperation(masm, | 1517 CallCCodeForDoubleOperation(masm, op, result, scratch1); |
| 1774 op, | |
| 1775 result, | |
| 1776 scratch1); | |
| 1777 if (FLAG_debug_code) { | 1518 if (FLAG_debug_code) { |
| 1778 __ stop("Unreachable code."); | 1519 __ stop("Unreachable code."); |
| 1779 } | 1520 } |
| 1780 } | 1521 } |
| 1781 break; | 1522 break; |
| 1782 } | 1523 } |
| 1783 case Token::BIT_OR: | 1524 case Token::BIT_OR: |
| 1784 case Token::BIT_XOR: | 1525 case Token::BIT_XOR: |
| 1785 case Token::BIT_AND: | 1526 case Token::BIT_AND: |
| 1786 case Token::SAR: | 1527 case Token::SAR: |
| 1787 case Token::SHR: | 1528 case Token::SHR: |
| 1788 case Token::SHL: { | 1529 case Token::SHL: { |
| 1789 if (smi_operands) { | 1530 if (smi_operands) { |
| 1790 __ SmiUntag(a3, left); | 1531 __ SmiUntag(a3, left); |
| 1791 __ SmiUntag(a2, right); | 1532 __ SmiUntag(a2, right); |
| 1792 } else { | 1533 } else { |
| 1793 // Convert operands to 32-bit integers. Right in a2 and left in a3. | 1534 // Convert operands to 32-bit integers. Right in a2 and left in a3. |
| 1794 FloatingPointHelper::ConvertNumberToInt32(masm, | 1535 __ ConvertNumberToInt32( |
| 1795 left, | 1536 left, a3, heap_number_map, |
| 1796 a3, | 1537 scratch1, scratch2, scratch3, f0, not_numbers); |
| 1797 heap_number_map, | 1538 __ ConvertNumberToInt32( |
| 1798 scratch1, | 1539 right, a2, heap_number_map, |
| 1799 scratch2, | 1540 scratch1, scratch2, scratch3, f0, not_numbers); |
| 1800 scratch3, | |
| 1801 f0, | |
| 1802 not_numbers); | |
| 1803 FloatingPointHelper::ConvertNumberToInt32(masm, | |
| 1804 right, | |
| 1805 a2, | |
| 1806 heap_number_map, | |
| 1807 scratch1, | |
| 1808 scratch2, | |
| 1809 scratch3, | |
| 1810 f0, | |
| 1811 not_numbers); | |
| 1812 } | 1541 } |
| 1813 Label result_not_a_smi; | 1542 Label result_not_a_smi; |
| 1814 switch (op) { | 1543 switch (op) { |
| 1815 case Token::BIT_OR: | 1544 case Token::BIT_OR: |
| 1816 __ Or(a2, a3, Operand(a2)); | 1545 __ Or(a2, a3, Operand(a2)); |
| 1817 break; | 1546 break; |
| 1818 case Token::BIT_XOR: | 1547 case Token::BIT_XOR: |
| 1819 __ Xor(a2, a3, Operand(a2)); | 1548 __ Xor(a2, a3, Operand(a2)); |
| 1820 break; | 1549 break; |
| 1821 case Token::BIT_AND: | 1550 case Token::BIT_AND: |
| (...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2035 // again if this changes. | 1764 // again if this changes. |
| 2036 if (left_type_ == BinaryOpIC::SMI) { | 1765 if (left_type_ == BinaryOpIC::SMI) { |
| 2037 __ JumpIfNotSmi(left, &transition); | 1766 __ JumpIfNotSmi(left, &transition); |
| 2038 } | 1767 } |
| 2039 if (right_type_ == BinaryOpIC::SMI) { | 1768 if (right_type_ == BinaryOpIC::SMI) { |
| 2040 __ JumpIfNotSmi(right, &transition); | 1769 __ JumpIfNotSmi(right, &transition); |
| 2041 } | 1770 } |
| 2042 // Load both operands and check that they are 32-bit integer. | 1771 // Load both operands and check that they are 32-bit integer. |
| 2043 // Jump to type transition if they are not. The registers a0 and a1 (right | 1772 // Jump to type transition if they are not. The registers a0 and a1 (right |
| 2044 // and left) are preserved for the runtime call. | 1773 // and left) are preserved for the runtime call. |
| 2045 FloatingPointHelper::Destination destination = (op_ != Token::MOD) | |
| 2046 ? FloatingPointHelper::kFPURegisters | |
| 2047 : FloatingPointHelper::kCoreRegisters; | |
| 2048 | 1774 |
| 2049 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 1775 __ LoadNumberAsInt32Double( |
| 2050 right, | 1776 right, f14, heap_number_map, scratch1, scratch2, f2, &transition); |
| 2051 destination, | 1777 __ LoadNumberAsInt32Double( |
| 2052 f14, | 1778 left, f12, heap_number_map, scratch1, scratch2, f2, &transition); |
| 2053 f16, | |
| 2054 a2, | |
| 2055 a3, | |
| 2056 heap_number_map, | |
| 2057 scratch1, | |
| 2058 scratch2, | |
| 2059 f2, | |
| 2060 &transition); | |
| 2061 FloatingPointHelper::LoadNumberAsInt32Double(masm, | |
| 2062 left, | |
| 2063 destination, | |
| 2064 f12, | |
| 2065 f16, | |
| 2066 t0, | |
| 2067 t1, | |
| 2068 heap_number_map, | |
| 2069 scratch1, | |
| 2070 scratch2, | |
| 2071 f2, | |
| 2072 &transition); | |
| 2073 | 1779 |
| 2074 if (destination == FloatingPointHelper::kFPURegisters) { | 1780 if (op_ != Token::MOD) { |
| 2075 Label return_heap_number; | 1781 Label return_heap_number; |
| 2076 switch (op_) { | 1782 switch (op_) { |
| 2077 case Token::ADD: | 1783 case Token::ADD: |
| 2078 __ add_d(f10, f12, f14); | 1784 __ add_d(f10, f12, f14); |
| 2079 break; | 1785 break; |
| 2080 case Token::SUB: | 1786 case Token::SUB: |
| 2081 __ sub_d(f10, f12, f14); | 1787 __ sub_d(f10, f12, f14); |
| 2082 break; | 1788 break; |
| 2083 case Token::MUL: | 1789 case Token::MUL: |
| 2084 __ mul_d(f10, f12, f14); | 1790 __ mul_d(f10, f12, f14); |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2141 | 1847 |
| 2142 // A DIV operation expecting an integer result falls through | 1848 // A DIV operation expecting an integer result falls through |
| 2143 // to type transition. | 1849 // to type transition. |
| 2144 | 1850 |
| 2145 } else { | 1851 } else { |
| 2146 if (encoded_right_arg_.has_value) { | 1852 if (encoded_right_arg_.has_value) { |
| 2147 __ Move(f16, fixed_right_arg_value()); | 1853 __ Move(f16, fixed_right_arg_value()); |
| 2148 __ BranchF(&transition, NULL, ne, f14, f16); | 1854 __ BranchF(&transition, NULL, ne, f14, f16); |
| 2149 } | 1855 } |
| 2150 | 1856 |
| 2151 // We preserved a0 and a1 to be able to call runtime. | |
| 2152 // Save the left value on the stack. | |
| 2153 __ Push(t1, t0); | |
| 2154 | |
| 2155 Label pop_and_call_runtime; | 1857 Label pop_and_call_runtime; |
| 2156 | 1858 |
| 2157 // Allocate a heap number to store the result. | 1859 // Allocate a heap number to store the result. |
| 2158 heap_number_result = s0; | 1860 heap_number_result = s0; |
| 2159 BinaryOpStub_GenerateHeapResultAllocation(masm, | 1861 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 2160 heap_number_result, | 1862 heap_number_result, |
| 2161 heap_number_map, | 1863 heap_number_map, |
| 2162 scratch1, | 1864 scratch1, |
| 2163 scratch2, | 1865 scratch2, |
| 2164 &pop_and_call_runtime, | 1866 &pop_and_call_runtime, |
| 2165 mode_); | 1867 mode_); |
| 2166 | 1868 |
| 2167 // Load the left value from the value saved on the stack. | |
| 2168 __ Pop(a1, a0); | |
| 2169 | |
| 2170 // Call the C function to handle the double operation. | 1869 // Call the C function to handle the double operation. |
| 2171 FloatingPointHelper::CallCCodeForDoubleOperation( | 1870 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); |
| 2172 masm, op_, heap_number_result, scratch1); | |
| 2173 if (FLAG_debug_code) { | 1871 if (FLAG_debug_code) { |
| 2174 __ stop("Unreachable code."); | 1872 __ stop("Unreachable code."); |
| 2175 } | 1873 } |
| 2176 | 1874 |
| 2177 __ bind(&pop_and_call_runtime); | 1875 __ bind(&pop_and_call_runtime); |
| 2178 __ Drop(2); | 1876 __ Drop(2); |
| 2179 __ Branch(&call_runtime); | 1877 __ Branch(&call_runtime); |
| 2180 } | 1878 } |
| 2181 | 1879 |
| 2182 break; | 1880 break; |
| 2183 } | 1881 } |
| 2184 | 1882 |
| 2185 case Token::BIT_OR: | 1883 case Token::BIT_OR: |
| 2186 case Token::BIT_XOR: | 1884 case Token::BIT_XOR: |
| 2187 case Token::BIT_AND: | 1885 case Token::BIT_AND: |
| 2188 case Token::SAR: | 1886 case Token::SAR: |
| 2189 case Token::SHR: | 1887 case Token::SHR: |
| 2190 case Token::SHL: { | 1888 case Token::SHL: { |
| 2191 Label return_heap_number; | 1889 Label return_heap_number; |
| 2192 Register scratch3 = t1; | |
| 2193 // Convert operands to 32-bit integers. Right in a2 and left in a3. The | 1890 // Convert operands to 32-bit integers. Right in a2 and left in a3. The |
| 2194 // registers a0 and a1 (right and left) are preserved for the runtime | 1891 // registers a0 and a1 (right and left) are preserved for the runtime |
| 2195 // call. | 1892 // call. |
| 2196 FloatingPointHelper::LoadNumberAsInt32(masm, | 1893 __ LoadNumberAsInt32( |
| 2197 left, | 1894 left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition); |
| 2198 a3, | 1895 __ LoadNumberAsInt32( |
| 2199 heap_number_map, | 1896 right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition); |
| 2200 scratch1, | |
| 2201 scratch2, | |
| 2202 scratch3, | |
| 2203 f0, | |
| 2204 f2, | |
| 2205 &transition); | |
| 2206 FloatingPointHelper::LoadNumberAsInt32(masm, | |
| 2207 right, | |
| 2208 a2, | |
| 2209 heap_number_map, | |
| 2210 scratch1, | |
| 2211 scratch2, | |
| 2212 scratch3, | |
| 2213 f0, | |
| 2214 f2, | |
| 2215 &transition); | |
| 2216 | 1897 |
| 2217 // The ECMA-262 standard specifies that, for shift operations, only the | 1898 // The ECMA-262 standard specifies that, for shift operations, only the |
| 2218 // 5 least significant bits of the shift value should be used. | 1899 // 5 least significant bits of the shift value should be used. |
| 2219 switch (op_) { | 1900 switch (op_) { |
| 2220 case Token::BIT_OR: | 1901 case Token::BIT_OR: |
| 2221 __ Or(a2, a3, Operand(a2)); | 1902 __ Or(a2, a3, Operand(a2)); |
| 2222 break; | 1903 break; |
| 2223 case Token::BIT_XOR: | 1904 case Token::BIT_XOR: |
| 2224 __ Xor(a2, a3, Operand(a2)); | 1905 __ Xor(a2, a3, Operand(a2)); |
| 2225 break; | 1906 break; |
| (...skipping 4864 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7090 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); | 6771 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); |
| 7091 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); | 6772 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); |
| 7092 __ Addu(t2, t1, t2); | 6773 __ Addu(t2, t1, t2); |
| 7093 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize)); | 6774 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize)); |
| 7094 __ Ret(USE_DELAY_SLOT); | 6775 __ Ret(USE_DELAY_SLOT); |
| 7095 __ mov(v0, a0); | 6776 __ mov(v0, a0); |
| 7096 | 6777 |
| 7097 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. | 6778 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. |
| 7098 __ bind(&double_elements); | 6779 __ bind(&double_elements); |
| 7099 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); | 6780 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); |
| 7100 __ StoreNumberToDoubleElements(a0, a3, | 6781 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements); |
| 7101 // Overwrites all regs after this. | |
| 7102 t1, t2, t3, t5, a2, | |
| 7103 &slow_elements); | |
| 7104 __ Ret(USE_DELAY_SLOT); | 6782 __ Ret(USE_DELAY_SLOT); |
| 7105 __ mov(v0, a0); | 6783 __ mov(v0, a0); |
| 7106 } | 6784 } |
| 7107 | 6785 |
| 7108 | 6786 |
| 7109 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 6787 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
| 7110 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); | 6788 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); |
| 7111 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 6789 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| 7112 int parameter_count_offset = | 6790 int parameter_count_offset = |
| 7113 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 6791 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
| (...skipping 350 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7464 __ bind(&fast_elements_case); | 7142 __ bind(&fast_elements_case); |
| 7465 GenerateCase(masm, FAST_ELEMENTS); | 7143 GenerateCase(masm, FAST_ELEMENTS); |
| 7466 } | 7144 } |
| 7467 | 7145 |
| 7468 | 7146 |
| 7469 #undef __ | 7147 #undef __ |
| 7470 | 7148 |
| 7471 } } // namespace v8::internal | 7149 } } // namespace v8::internal |
| 7472 | 7150 |
| 7473 #endif // V8_TARGET_ARCH_MIPS | 7151 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |