OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 627 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
638 __ Ret(USE_DELAY_SLOT); | 638 __ Ret(USE_DELAY_SLOT); |
639 __ or_(exponent, exponent, source_); | 639 __ or_(exponent, exponent, source_); |
640 } | 640 } |
641 | 641 |
642 | 642 |
643 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 643 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
644 FloatingPointHelper::Destination destination, | 644 FloatingPointHelper::Destination destination, |
645 Register scratch1, | 645 Register scratch1, |
646 Register scratch2) { | 646 Register scratch2) { |
647 if (CpuFeatures::IsSupported(FPU)) { | 647 if (CpuFeatures::IsSupported(FPU)) { |
648 CpuFeatures::Scope scope(FPU); | 648 CpuFeatureScope scope(masm, FPU); |
649 __ sra(scratch1, a0, kSmiTagSize); | 649 __ sra(scratch1, a0, kSmiTagSize); |
650 __ mtc1(scratch1, f14); | 650 __ mtc1(scratch1, f14); |
651 __ cvt_d_w(f14, f14); | 651 __ cvt_d_w(f14, f14); |
652 __ sra(scratch1, a1, kSmiTagSize); | 652 __ sra(scratch1, a1, kSmiTagSize); |
653 __ mtc1(scratch1, f12); | 653 __ mtc1(scratch1, f12); |
654 __ cvt_d_w(f12, f12); | 654 __ cvt_d_w(f12, f12); |
655 if (destination == kCoreRegisters) { | 655 if (destination == kCoreRegisters) { |
656 __ Move(a2, a3, f14); | 656 __ Move(a2, a3, f14); |
657 __ Move(a0, a1, f12); | 657 __ Move(a0, a1, f12); |
658 } | 658 } |
(...skipping 30 matching lines...) Expand all Loading... |
689 Label is_smi, done; | 689 Label is_smi, done; |
690 | 690 |
691 // Smi-check | 691 // Smi-check |
692 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); | 692 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); |
693 // Heap number check | 693 // Heap number check |
694 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 694 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
695 | 695 |
696 // Handle loading a double from a heap number. | 696 // Handle loading a double from a heap number. |
697 if (CpuFeatures::IsSupported(FPU) && | 697 if (CpuFeatures::IsSupported(FPU) && |
698 destination == kFPURegisters) { | 698 destination == kFPURegisters) { |
699 CpuFeatures::Scope scope(FPU); | 699 CpuFeatureScope scope(masm, FPU); |
700 // Load the double from tagged HeapNumber to double register. | 700 // Load the double from tagged HeapNumber to double register. |
701 | 701 |
702 // ARM uses a workaround here because of the unaligned HeapNumber | 702 // ARM uses a workaround here because of the unaligned HeapNumber |
703 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no | 703 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no |
704 // point in generating even more instructions. | 704 // point in generating even more instructions. |
705 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | 705 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
706 } else { | 706 } else { |
707 ASSERT(destination == kCoreRegisters); | 707 ASSERT(destination == kCoreRegisters); |
708 // Load the double from heap number to dst1 and dst2 in double format. | 708 // Load the double from heap number to dst1 and dst2 in double format. |
709 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); | 709 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); |
710 __ lw(dst2, FieldMemOperand(object, | 710 __ lw(dst2, FieldMemOperand(object, |
711 HeapNumber::kValueOffset + kPointerSize)); | 711 HeapNumber::kValueOffset + kPointerSize)); |
712 } | 712 } |
713 __ Branch(&done); | 713 __ Branch(&done); |
714 | 714 |
715 // Handle loading a double from a smi. | 715 // Handle loading a double from a smi. |
716 __ bind(&is_smi); | 716 __ bind(&is_smi); |
717 if (CpuFeatures::IsSupported(FPU)) { | 717 if (CpuFeatures::IsSupported(FPU)) { |
718 CpuFeatures::Scope scope(FPU); | 718 CpuFeatureScope scope(masm, FPU); |
719 // Convert smi to double using FPU instructions. | 719 // Convert smi to double using FPU instructions. |
720 __ mtc1(scratch1, dst); | 720 __ mtc1(scratch1, dst); |
721 __ cvt_d_w(dst, dst); | 721 __ cvt_d_w(dst, dst); |
722 if (destination == kCoreRegisters) { | 722 if (destination == kCoreRegisters) { |
723 // Load the converted smi to dst1 and dst2 in double format. | 723 // Load the converted smi to dst1 and dst2 in double format. |
724 __ Move(dst1, dst2, dst); | 724 __ Move(dst1, dst2, dst); |
725 } | 725 } |
726 } else { | 726 } else { |
727 ASSERT(destination == kCoreRegisters); | 727 ASSERT(destination == kCoreRegisters); |
728 // Write smi to dst1 and dst2 double format. | 728 // Write smi to dst1 and dst2 double format. |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
784 Register dst_exponent, | 784 Register dst_exponent, |
785 Register scratch2, | 785 Register scratch2, |
786 FPURegister single_scratch) { | 786 FPURegister single_scratch) { |
787 ASSERT(!int_scratch.is(scratch2)); | 787 ASSERT(!int_scratch.is(scratch2)); |
788 ASSERT(!int_scratch.is(dst_mantissa)); | 788 ASSERT(!int_scratch.is(dst_mantissa)); |
789 ASSERT(!int_scratch.is(dst_exponent)); | 789 ASSERT(!int_scratch.is(dst_exponent)); |
790 | 790 |
791 Label done; | 791 Label done; |
792 | 792 |
793 if (CpuFeatures::IsSupported(FPU)) { | 793 if (CpuFeatures::IsSupported(FPU)) { |
794 CpuFeatures::Scope scope(FPU); | 794 CpuFeatureScope scope(masm, FPU); |
795 __ mtc1(int_scratch, single_scratch); | 795 __ mtc1(int_scratch, single_scratch); |
796 __ cvt_d_w(double_dst, single_scratch); | 796 __ cvt_d_w(double_dst, single_scratch); |
797 if (destination == kCoreRegisters) { | 797 if (destination == kCoreRegisters) { |
798 __ Move(dst_mantissa, dst_exponent, double_dst); | 798 __ Move(dst_mantissa, dst_exponent, double_dst); |
799 } | 799 } |
800 } else { | 800 } else { |
801 Label fewer_than_20_useful_bits; | 801 Label fewer_than_20_useful_bits; |
802 // Expected output: | 802 // Expected output: |
803 // | dst_exponent | dst_mantissa | | 803 // | dst_exponent | dst_mantissa | |
804 // | s | exp | mantissa | | 804 // | s | exp | mantissa | |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
886 __ Branch(&done); | 886 __ Branch(&done); |
887 | 887 |
888 __ bind(&obj_is_not_smi); | 888 __ bind(&obj_is_not_smi); |
889 __ AssertRootValue(heap_number_map, | 889 __ AssertRootValue(heap_number_map, |
890 Heap::kHeapNumberMapRootIndex, | 890 Heap::kHeapNumberMapRootIndex, |
891 "HeapNumberMap register clobbered."); | 891 "HeapNumberMap register clobbered."); |
892 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 892 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
893 | 893 |
894 // Load the number. | 894 // Load the number. |
895 if (CpuFeatures::IsSupported(FPU)) { | 895 if (CpuFeatures::IsSupported(FPU)) { |
896 CpuFeatures::Scope scope(FPU); | 896 CpuFeatureScope scope(masm, FPU); |
897 // Load the double value. | 897 // Load the double value. |
898 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | 898 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
899 | 899 |
900 Register except_flag = scratch2; | 900 Register except_flag = scratch2; |
901 __ EmitFPUTruncate(kRoundToZero, | 901 __ EmitFPUTruncate(kRoundToZero, |
902 scratch1, | 902 scratch1, |
903 double_dst, | 903 double_dst, |
904 at, | 904 at, |
905 double_scratch, | 905 double_scratch, |
906 except_flag, | 906 except_flag, |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
983 | 983 |
984 __ AssertRootValue(heap_number_map, | 984 __ AssertRootValue(heap_number_map, |
985 Heap::kHeapNumberMapRootIndex, | 985 Heap::kHeapNumberMapRootIndex, |
986 "HeapNumberMap register clobbered."); | 986 "HeapNumberMap register clobbered."); |
987 | 987 |
988 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); | 988 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); |
989 | 989 |
990 // Object is a heap number. | 990 // Object is a heap number. |
991 // Convert the floating point value to a 32-bit integer. | 991 // Convert the floating point value to a 32-bit integer. |
992 if (CpuFeatures::IsSupported(FPU)) { | 992 if (CpuFeatures::IsSupported(FPU)) { |
993 CpuFeatures::Scope scope(FPU); | 993 CpuFeatureScope scope(masm, FPU); |
994 // Load the double value. | 994 // Load the double value. |
995 __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); | 995 __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); |
996 | 996 |
997 Register except_flag = scratch2; | 997 Register except_flag = scratch2; |
998 __ EmitFPUTruncate(kRoundToZero, | 998 __ EmitFPUTruncate(kRoundToZero, |
999 dst, | 999 dst, |
1000 double_scratch0, | 1000 double_scratch0, |
1001 scratch1, | 1001 scratch1, |
1002 double_scratch1, | 1002 double_scratch1, |
1003 except_flag, | 1003 except_flag, |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1121 // a3: Right value (sign, exponent, top of mantissa). | 1121 // a3: Right value (sign, exponent, top of mantissa). |
1122 | 1122 |
1123 // Assert that heap_number_result is saved. | 1123 // Assert that heap_number_result is saved. |
1124 // We currently always use s0 to pass it. | 1124 // We currently always use s0 to pass it. |
1125 ASSERT(heap_number_result.is(s0)); | 1125 ASSERT(heap_number_result.is(s0)); |
1126 | 1126 |
1127 // Push the current return address before the C call. | 1127 // Push the current return address before the C call. |
1128 __ push(ra); | 1128 __ push(ra); |
1129 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. | 1129 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. |
1130 if (!IsMipsSoftFloatABI) { | 1130 if (!IsMipsSoftFloatABI) { |
1131 CpuFeatures::Scope scope(FPU); | 1131 CpuFeatureScope scope(masm, FPU); |
1132 // We are not using MIPS FPU instructions, and parameters for the runtime | 1132 // We are not using MIPS FPU instructions, and parameters for the runtime |
1133 // function call are prepaired in a0-a3 registers, but function we are | 1133 // function call are prepaired in a0-a3 registers, but function we are |
1134 // calling is compiled with hard-float flag and expecting hard float ABI | 1134 // calling is compiled with hard-float flag and expecting hard float ABI |
1135 // (parameters in f12/f14 registers). We need to copy parameters from | 1135 // (parameters in f12/f14 registers). We need to copy parameters from |
1136 // a0-a3 registers to f12/f14 register pairs. | 1136 // a0-a3 registers to f12/f14 register pairs. |
1137 __ Move(f12, a0, a1); | 1137 __ Move(f12, a0, a1); |
1138 __ Move(f14, a2, a3); | 1138 __ Move(f14, a2, a3); |
1139 } | 1139 } |
1140 { | 1140 { |
1141 AllowExternalCallThatCantCauseGC scope(masm); | 1141 AllowExternalCallThatCantCauseGC scope(masm); |
1142 __ CallCFunction( | 1142 __ CallCFunction( |
1143 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | 1143 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
1144 } | 1144 } |
1145 // Store answer in the overwritable heap number. | 1145 // Store answer in the overwritable heap number. |
1146 if (!IsMipsSoftFloatABI) { | 1146 if (!IsMipsSoftFloatABI) { |
1147 CpuFeatures::Scope scope(FPU); | 1147 CpuFeatureScope scope(masm, FPU); |
1148 // Double returned in register f0. | 1148 // Double returned in register f0. |
1149 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | 1149 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
1150 } else { | 1150 } else { |
1151 // Double returned in registers v0 and v1. | 1151 // Double returned in registers v0 and v1. |
1152 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); | 1152 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); |
1153 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); | 1153 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); |
1154 } | 1154 } |
1155 // Place heap_number_result in v0 and return to the pushed return address. | 1155 // Place heap_number_result in v0 and return to the pushed return address. |
1156 __ pop(ra); | 1156 __ pop(ra); |
1157 __ Ret(USE_DELAY_SLOT); | 1157 __ Ret(USE_DELAY_SLOT); |
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1361 __ mov(v0, lhs); | 1361 __ mov(v0, lhs); |
1362 } else { | 1362 } else { |
1363 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 1363 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
1364 // the runtime. | 1364 // the runtime. |
1365 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 1365 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); |
1366 } | 1366 } |
1367 | 1367 |
1368 // Rhs is a smi, lhs is a number. | 1368 // Rhs is a smi, lhs is a number. |
1369 // Convert smi rhs to double. | 1369 // Convert smi rhs to double. |
1370 if (CpuFeatures::IsSupported(FPU)) { | 1370 if (CpuFeatures::IsSupported(FPU)) { |
1371 CpuFeatures::Scope scope(FPU); | 1371 CpuFeatureScope scope(masm, FPU); |
1372 __ sra(at, rhs, kSmiTagSize); | 1372 __ sra(at, rhs, kSmiTagSize); |
1373 __ mtc1(at, f14); | 1373 __ mtc1(at, f14); |
1374 __ cvt_d_w(f14, f14); | 1374 __ cvt_d_w(f14, f14); |
1375 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1375 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1376 } else { | 1376 } else { |
1377 // Load lhs to a double in a2, a3. | 1377 // Load lhs to a double in a2, a3. |
1378 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); | 1378 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); |
1379 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1379 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1380 | 1380 |
1381 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. | 1381 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. |
(...skipping 18 matching lines...) Expand all Loading... |
1400 __ li(v0, Operand(1)); | 1400 __ li(v0, Operand(1)); |
1401 } else { | 1401 } else { |
1402 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 1402 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
1403 // the runtime. | 1403 // the runtime. |
1404 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 1404 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); |
1405 } | 1405 } |
1406 | 1406 |
1407 // Lhs is a smi, rhs is a number. | 1407 // Lhs is a smi, rhs is a number. |
1408 // Convert smi lhs to double. | 1408 // Convert smi lhs to double. |
1409 if (CpuFeatures::IsSupported(FPU)) { | 1409 if (CpuFeatures::IsSupported(FPU)) { |
1410 CpuFeatures::Scope scope(FPU); | 1410 CpuFeatureScope scope(masm, FPU); |
1411 __ sra(at, lhs, kSmiTagSize); | 1411 __ sra(at, lhs, kSmiTagSize); |
1412 __ mtc1(at, f12); | 1412 __ mtc1(at, f12); |
1413 __ cvt_d_w(f12, f12); | 1413 __ cvt_d_w(f12, f12); |
1414 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1414 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1415 } else { | 1415 } else { |
1416 // Convert lhs to a double format. t5 is scratch. | 1416 // Convert lhs to a double format. t5 is scratch. |
1417 __ mov(t6, lhs); | 1417 __ mov(t6, lhs); |
1418 ConvertToDoubleStub stub2(a3, a2, t6, t5); | 1418 ConvertToDoubleStub stub2(a3, a2, t6, t5); |
1419 __ push(ra); | 1419 __ push(ra); |
1420 __ Call(stub2.GetCode(masm->isolate())); | 1420 __ Call(stub2.GetCode(masm->isolate())); |
1421 __ pop(ra); | 1421 __ pop(ra); |
1422 // Load rhs to a double in a1, a0. | 1422 // Load rhs to a double in a1, a0. |
1423 if (rhs.is(a0)) { | 1423 if (rhs.is(a0)) { |
1424 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | 1424 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
1425 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1425 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1426 } else { | 1426 } else { |
1427 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1427 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1428 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | 1428 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
1429 } | 1429 } |
1430 } | 1430 } |
1431 // Fall through to both_loaded_as_doubles. | 1431 // Fall through to both_loaded_as_doubles. |
1432 } | 1432 } |
1433 | 1433 |
1434 | 1434 |
1435 void EmitNanCheck(MacroAssembler* masm, Condition cc) { | 1435 void EmitNanCheck(MacroAssembler* masm, Condition cc) { |
1436 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1436 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
1437 if (CpuFeatures::IsSupported(FPU)) { | 1437 if (CpuFeatures::IsSupported(FPU)) { |
1438 CpuFeatures::Scope scope(FPU); | 1438 CpuFeatureScope scope(masm, FPU); |
1439 // Lhs and rhs are already loaded to f12 and f14 register pairs. | 1439 // Lhs and rhs are already loaded to f12 and f14 register pairs. |
1440 __ Move(t0, t1, f14); | 1440 __ Move(t0, t1, f14); |
1441 __ Move(t2, t3, f12); | 1441 __ Move(t2, t3, f12); |
1442 } else { | 1442 } else { |
1443 // Lhs and rhs are already loaded to GP registers. | 1443 // Lhs and rhs are already loaded to GP registers. |
1444 __ mov(t0, a0); // a0 has LS 32 bits of rhs. | 1444 __ mov(t0, a0); // a0 has LS 32 bits of rhs. |
1445 __ mov(t1, a1); // a1 has MS 32 bits of rhs. | 1445 __ mov(t1, a1); // a1 has MS 32 bits of rhs. |
1446 __ mov(t2, a2); // a2 has LS 32 bits of lhs. | 1446 __ mov(t2, a2); // a2 has LS 32 bits of lhs. |
1447 __ mov(t3, a3); // a3 has MS 32 bits of lhs. | 1447 __ mov(t3, a3); // a3 has MS 32 bits of lhs. |
1448 } | 1448 } |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1495 // Call C routine that may not cause GC or other trouble. | 1495 // Call C routine that may not cause GC or other trouble. |
1496 // We use a call_was and return manually because we need arguments slots to | 1496 // We use a call_was and return manually because we need arguments slots to |
1497 // be freed. | 1497 // be freed. |
1498 | 1498 |
1499 Label return_result_not_equal, return_result_equal; | 1499 Label return_result_not_equal, return_result_equal; |
1500 if (cc == eq) { | 1500 if (cc == eq) { |
1501 // Doubles are not equal unless they have the same bit pattern. | 1501 // Doubles are not equal unless they have the same bit pattern. |
1502 // Exception: 0 and -0. | 1502 // Exception: 0 and -0. |
1503 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1503 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
1504 if (CpuFeatures::IsSupported(FPU)) { | 1504 if (CpuFeatures::IsSupported(FPU)) { |
1505 CpuFeatures::Scope scope(FPU); | 1505 CpuFeatureScope scope(masm, FPU); |
1506 // Lhs and rhs are already loaded to f12 and f14 register pairs. | 1506 // Lhs and rhs are already loaded to f12 and f14 register pairs. |
1507 __ Move(t0, t1, f14); | 1507 __ Move(t0, t1, f14); |
1508 __ Move(t2, t3, f12); | 1508 __ Move(t2, t3, f12); |
1509 } else { | 1509 } else { |
1510 // Lhs and rhs are already loaded to GP registers. | 1510 // Lhs and rhs are already loaded to GP registers. |
1511 __ mov(t0, a0); // a0 has LS 32 bits of rhs. | 1511 __ mov(t0, a0); // a0 has LS 32 bits of rhs. |
1512 __ mov(t1, a1); // a1 has MS 32 bits of rhs. | 1512 __ mov(t1, a1); // a1 has MS 32 bits of rhs. |
1513 __ mov(t2, a2); // a2 has LS 32 bits of lhs. | 1513 __ mov(t2, a2); // a2 has LS 32 bits of lhs. |
1514 __ mov(t3, a3); // a3 has MS 32 bits of lhs. | 1514 __ mov(t3, a3); // a3 has MS 32 bits of lhs. |
1515 } | 1515 } |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1551 __ Move(f12, a0, a1); | 1551 __ Move(f12, a0, a1); |
1552 __ Move(f14, a2, a3); | 1552 __ Move(f14, a2, a3); |
1553 } | 1553 } |
1554 | 1554 |
1555 AllowExternalCallThatCantCauseGC scope(masm); | 1555 AllowExternalCallThatCantCauseGC scope(masm); |
1556 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), | 1556 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), |
1557 0, 2); | 1557 0, 2); |
1558 __ pop(ra); // Because this function returns int, result is in v0. | 1558 __ pop(ra); // Because this function returns int, result is in v0. |
1559 __ Ret(); | 1559 __ Ret(); |
1560 } else { | 1560 } else { |
1561 CpuFeatures::Scope scope(FPU); | 1561 CpuFeatureScope scope(masm, FPU); |
1562 Label equal, less_than; | 1562 Label equal, less_than; |
1563 __ BranchF(&equal, NULL, eq, f12, f14); | 1563 __ BranchF(&equal, NULL, eq, f12, f14); |
1564 __ BranchF(&less_than, NULL, lt, f12, f14); | 1564 __ BranchF(&less_than, NULL, lt, f12, f14); |
1565 | 1565 |
1566 // Not equal, not less, not NaN, must be greater. | 1566 // Not equal, not less, not NaN, must be greater. |
1567 | 1567 |
1568 __ li(v0, Operand(GREATER)); | 1568 __ li(v0, Operand(GREATER)); |
1569 __ Ret(); | 1569 __ Ret(); |
1570 | 1570 |
1571 __ bind(&equal); | 1571 __ bind(&equal); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1626 Label* slow) { | 1626 Label* slow) { |
1627 __ GetObjectType(lhs, a3, a2); | 1627 __ GetObjectType(lhs, a3, a2); |
1628 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); | 1628 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); |
1629 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 1629 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
1630 // If first was a heap number & second wasn't, go to slow case. | 1630 // If first was a heap number & second wasn't, go to slow case. |
1631 __ Branch(slow, ne, a3, Operand(a2)); | 1631 __ Branch(slow, ne, a3, Operand(a2)); |
1632 | 1632 |
1633 // Both are heap numbers. Load them up then jump to the code we have | 1633 // Both are heap numbers. Load them up then jump to the code we have |
1634 // for that. | 1634 // for that. |
1635 if (CpuFeatures::IsSupported(FPU)) { | 1635 if (CpuFeatures::IsSupported(FPU)) { |
1636 CpuFeatures::Scope scope(FPU); | 1636 CpuFeatureScope scope(masm, FPU); |
1637 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1637 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1638 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1638 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1639 } else { | 1639 } else { |
1640 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1640 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1641 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); | 1641 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); |
1642 if (rhs.is(a0)) { | 1642 if (rhs.is(a0)) { |
1643 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); | 1643 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
1644 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1644 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1645 } else { | 1645 } else { |
1646 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1646 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1721 // Calculate the entry in the number string cache. The hash value in the | 1721 // Calculate the entry in the number string cache. The hash value in the |
1722 // number string cache for smis is just the smi value, and the hash for | 1722 // number string cache for smis is just the smi value, and the hash for |
1723 // doubles is the xor of the upper and lower words. See | 1723 // doubles is the xor of the upper and lower words. See |
1724 // Heap::GetNumberStringCache. | 1724 // Heap::GetNumberStringCache. |
1725 Isolate* isolate = masm->isolate(); | 1725 Isolate* isolate = masm->isolate(); |
1726 Label is_smi; | 1726 Label is_smi; |
1727 Label load_result_from_cache; | 1727 Label load_result_from_cache; |
1728 if (!object_is_smi) { | 1728 if (!object_is_smi) { |
1729 __ JumpIfSmi(object, &is_smi); | 1729 __ JumpIfSmi(object, &is_smi); |
1730 if (CpuFeatures::IsSupported(FPU)) { | 1730 if (CpuFeatures::IsSupported(FPU)) { |
1731 CpuFeatures::Scope scope(FPU); | 1731 CpuFeatureScope scope(masm, FPU); |
1732 __ CheckMap(object, | 1732 __ CheckMap(object, |
1733 scratch1, | 1733 scratch1, |
1734 Heap::kHeapNumberMapRootIndex, | 1734 Heap::kHeapNumberMapRootIndex, |
1735 not_found, | 1735 not_found, |
1736 DONT_DO_SMI_CHECK); | 1736 DONT_DO_SMI_CHECK); |
1737 | 1737 |
1738 STATIC_ASSERT(8 == kDoubleSize); | 1738 STATIC_ASSERT(8 == kDoubleSize); |
1739 __ Addu(scratch1, | 1739 __ Addu(scratch1, |
1740 object, | 1740 object, |
1741 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 1741 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1874 EmitSmiNonsmiComparison(masm, lhs, rhs, | 1874 EmitSmiNonsmiComparison(masm, lhs, rhs, |
1875 &both_loaded_as_doubles, &slow, strict()); | 1875 &both_loaded_as_doubles, &slow, strict()); |
1876 | 1876 |
1877 __ bind(&both_loaded_as_doubles); | 1877 __ bind(&both_loaded_as_doubles); |
1878 // f12, f14 are the double representations of the left hand side | 1878 // f12, f14 are the double representations of the left hand side |
1879 // and the right hand side if we have FPU. Otherwise a2, a3 represent | 1879 // and the right hand side if we have FPU. Otherwise a2, a3 represent |
1880 // left hand side and a0, a1 represent right hand side. | 1880 // left hand side and a0, a1 represent right hand side. |
1881 | 1881 |
1882 Isolate* isolate = masm->isolate(); | 1882 Isolate* isolate = masm->isolate(); |
1883 if (CpuFeatures::IsSupported(FPU)) { | 1883 if (CpuFeatures::IsSupported(FPU)) { |
1884 CpuFeatures::Scope scope(FPU); | 1884 CpuFeatureScope scope(masm, FPU); |
1885 Label nan; | 1885 Label nan; |
1886 __ li(t0, Operand(LESS)); | 1886 __ li(t0, Operand(LESS)); |
1887 __ li(t1, Operand(GREATER)); | 1887 __ li(t1, Operand(GREATER)); |
1888 __ li(t2, Operand(EQUAL)); | 1888 __ li(t2, Operand(EQUAL)); |
1889 | 1889 |
1890 // Check if either rhs or lhs is NaN. | 1890 // Check if either rhs or lhs is NaN. |
1891 __ BranchF(NULL, &nan, eq, f12, f14); | 1891 __ BranchF(NULL, &nan, eq, f12, f14); |
1892 | 1892 |
1893 // Check if LESS condition is satisfied. If true, move conditionally | 1893 // Check if LESS condition is satisfied. If true, move conditionally |
1894 // result to v0. | 1894 // result to v0. |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2006 | 2006 |
2007 __ bind(&miss); | 2007 __ bind(&miss); |
2008 GenerateMiss(masm); | 2008 GenerateMiss(masm); |
2009 } | 2009 } |
2010 | 2010 |
2011 | 2011 |
2012 // The stub expects its argument in the tos_ register and returns its result in | 2012 // The stub expects its argument in the tos_ register and returns its result in |
2013 // it, too: zero for false, and a non-zero value for true. | 2013 // it, too: zero for false, and a non-zero value for true. |
2014 void ToBooleanStub::Generate(MacroAssembler* masm) { | 2014 void ToBooleanStub::Generate(MacroAssembler* masm) { |
2015 // This stub uses FPU instructions. | 2015 // This stub uses FPU instructions. |
2016 CpuFeatures::Scope scope(FPU); | 2016 CpuFeatureScope scope(masm, FPU); |
2017 | 2017 |
2018 Label patch; | 2018 Label patch; |
2019 const Register map = t5.is(tos_) ? t3 : t5; | 2019 const Register map = t5.is(tos_) ? t3 : t5; |
2020 | 2020 |
2021 // undefined -> false. | 2021 // undefined -> false. |
2022 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); | 2022 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); |
2023 | 2023 |
2024 // Boolean -> its value. | 2024 // Boolean -> its value. |
2025 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); | 2025 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); |
2026 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); | 2026 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2121 1); | 2121 1); |
2122 } | 2122 } |
2123 | 2123 |
2124 | 2124 |
2125 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 2125 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
2126 // We don't allow a GC during a store buffer overflow so there is no need to | 2126 // We don't allow a GC during a store buffer overflow so there is no need to |
2127 // store the registers in any particular way, but we do have to store and | 2127 // store the registers in any particular way, but we do have to store and |
2128 // restore them. | 2128 // restore them. |
2129 __ MultiPush(kJSCallerSaved | ra.bit()); | 2129 __ MultiPush(kJSCallerSaved | ra.bit()); |
2130 if (save_doubles_ == kSaveFPRegs) { | 2130 if (save_doubles_ == kSaveFPRegs) { |
2131 CpuFeatures::Scope scope(FPU); | 2131 CpuFeatureScope scope(masm, FPU); |
2132 __ MultiPushFPU(kCallerSavedFPU); | 2132 __ MultiPushFPU(kCallerSavedFPU); |
2133 } | 2133 } |
2134 const int argument_count = 1; | 2134 const int argument_count = 1; |
2135 const int fp_argument_count = 0; | 2135 const int fp_argument_count = 0; |
2136 const Register scratch = a1; | 2136 const Register scratch = a1; |
2137 | 2137 |
2138 AllowExternalCallThatCantCauseGC scope(masm); | 2138 AllowExternalCallThatCantCauseGC scope(masm); |
2139 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 2139 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); |
2140 __ li(a0, Operand(ExternalReference::isolate_address())); | 2140 __ li(a0, Operand(ExternalReference::isolate_address())); |
2141 __ CallCFunction( | 2141 __ CallCFunction( |
2142 ExternalReference::store_buffer_overflow_function(masm->isolate()), | 2142 ExternalReference::store_buffer_overflow_function(masm->isolate()), |
2143 argument_count); | 2143 argument_count); |
2144 if (save_doubles_ == kSaveFPRegs) { | 2144 if (save_doubles_ == kSaveFPRegs) { |
2145 CpuFeatures::Scope scope(FPU); | 2145 CpuFeatureScope scope(masm, FPU); |
2146 __ MultiPopFPU(kCallerSavedFPU); | 2146 __ MultiPopFPU(kCallerSavedFPU); |
2147 } | 2147 } |
2148 | 2148 |
2149 __ MultiPop(kJSCallerSaved | ra.bit()); | 2149 __ MultiPop(kJSCallerSaved | ra.bit()); |
2150 __ Ret(); | 2150 __ Ret(); |
2151 } | 2151 } |
2152 | 2152 |
2153 | 2153 |
2154 void UnaryOpStub::PrintName(StringStream* stream) { | 2154 void UnaryOpStub::PrintName(StringStream* stream) { |
2155 const char* op_name = Token::Name(op_); | 2155 const char* op_name = Token::Name(op_); |
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2368 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); | 2368 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); |
2369 // Negate the result. | 2369 // Negate the result. |
2370 __ Xor(a1, a1, -1); | 2370 __ Xor(a1, a1, -1); |
2371 | 2371 |
2372 __ bind(&heapnumber_allocated); | 2372 __ bind(&heapnumber_allocated); |
2373 __ mov(v0, a2); // Move newly allocated heap number to v0. | 2373 __ mov(v0, a2); // Move newly allocated heap number to v0. |
2374 } | 2374 } |
2375 | 2375 |
2376 if (CpuFeatures::IsSupported(FPU)) { | 2376 if (CpuFeatures::IsSupported(FPU)) { |
2377 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. | 2377 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. |
2378 CpuFeatures::Scope scope(FPU); | 2378 CpuFeatureScope scope(masm, FPU); |
2379 __ mtc1(a1, f0); | 2379 __ mtc1(a1, f0); |
2380 __ cvt_d_w(f0, f0); | 2380 __ cvt_d_w(f0, f0); |
2381 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 2381 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
2382 __ Ret(); | 2382 __ Ret(); |
2383 } else { | 2383 } else { |
2384 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | 2384 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not |
2385 // have to set up a frame. | 2385 // have to set up a frame. |
2386 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3); | 2386 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3); |
2387 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 2387 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
2388 } | 2388 } |
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2715 masm, destination, left, f12, a0, a1, heap_number_map, | 2715 masm, destination, left, f12, a0, a1, heap_number_map, |
2716 scratch1, scratch2, fail); | 2716 scratch1, scratch2, fail); |
2717 } | 2717 } |
2718 } | 2718 } |
2719 | 2719 |
2720 // Calculate the result. | 2720 // Calculate the result. |
2721 if (destination == FloatingPointHelper::kFPURegisters) { | 2721 if (destination == FloatingPointHelper::kFPURegisters) { |
2722 // Using FPU registers: | 2722 // Using FPU registers: |
2723 // f12: Left value. | 2723 // f12: Left value. |
2724 // f14: Right value. | 2724 // f14: Right value. |
2725 CpuFeatures::Scope scope(FPU); | 2725 CpuFeatureScope scope(masm, FPU); |
2726 switch (op) { | 2726 switch (op) { |
2727 case Token::ADD: | 2727 case Token::ADD: |
2728 __ add_d(f10, f12, f14); | 2728 __ add_d(f10, f12, f14); |
2729 break; | 2729 break; |
2730 case Token::SUB: | 2730 case Token::SUB: |
2731 __ sub_d(f10, f12, f14); | 2731 __ sub_d(f10, f12, f14); |
2732 break; | 2732 break; |
2733 case Token::MUL: | 2733 case Token::MUL: |
2734 __ mul_d(f10, f12, f14); | 2734 __ mul_d(f10, f12, f14); |
2735 break; | 2735 break; |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2847 // a2: Answer as signed int32. | 2847 // a2: Answer as signed int32. |
2848 // t1: Heap number to write answer into. | 2848 // t1: Heap number to write answer into. |
2849 | 2849 |
2850 // Nothing can go wrong now, so move the heap number to v0, which is the | 2850 // Nothing can go wrong now, so move the heap number to v0, which is the |
2851 // result. | 2851 // result. |
2852 __ mov(v0, t1); | 2852 __ mov(v0, t1); |
2853 | 2853 |
2854 if (CpuFeatures::IsSupported(FPU)) { | 2854 if (CpuFeatures::IsSupported(FPU)) { |
2855 // Convert the int32 in a2 to the heap number in a0. As | 2855 // Convert the int32 in a2 to the heap number in a0. As |
2856 // mentioned above SHR needs to always produce a positive result. | 2856 // mentioned above SHR needs to always produce a positive result. |
2857 CpuFeatures::Scope scope(FPU); | 2857 CpuFeatureScope scope(masm, FPU); |
2858 __ mtc1(a2, f0); | 2858 __ mtc1(a2, f0); |
2859 if (op == Token::SHR) { | 2859 if (op == Token::SHR) { |
2860 __ Cvt_d_uw(f0, f0, f22); | 2860 __ Cvt_d_uw(f0, f0, f22); |
2861 } else { | 2861 } else { |
2862 __ cvt_d_w(f0, f0); | 2862 __ cvt_d_w(f0, f0); |
2863 } | 2863 } |
2864 // ARM uses a workaround here because of the unaligned HeapNumber | 2864 // ARM uses a workaround here because of the unaligned HeapNumber |
2865 // kValueOffset. On MIPS this workaround is built into sdc1 so | 2865 // kValueOffset. On MIPS this workaround is built into sdc1 so |
2866 // there's no point in generating even more instructions. | 2866 // there's no point in generating even more instructions. |
2867 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 2867 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3042 f16, | 3042 f16, |
3043 t0, | 3043 t0, |
3044 t1, | 3044 t1, |
3045 heap_number_map, | 3045 heap_number_map, |
3046 scratch1, | 3046 scratch1, |
3047 scratch2, | 3047 scratch2, |
3048 f2, | 3048 f2, |
3049 &transition); | 3049 &transition); |
3050 | 3050 |
3051 if (destination == FloatingPointHelper::kFPURegisters) { | 3051 if (destination == FloatingPointHelper::kFPURegisters) { |
3052 CpuFeatures::Scope scope(FPU); | 3052 CpuFeatureScope scope(masm, FPU); |
3053 Label return_heap_number; | 3053 Label return_heap_number; |
3054 switch (op_) { | 3054 switch (op_) { |
3055 case Token::ADD: | 3055 case Token::ADD: |
3056 __ add_d(f10, f12, f14); | 3056 __ add_d(f10, f12, f14); |
3057 break; | 3057 break; |
3058 case Token::SUB: | 3058 case Token::SUB: |
3059 __ sub_d(f10, f12, f14); | 3059 __ sub_d(f10, f12, f14); |
3060 break; | 3060 break; |
3061 case Token::MUL: | 3061 case Token::MUL: |
3062 __ mul_d(f10, f12, f14); | 3062 __ mul_d(f10, f12, f14); |
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3256 heap_number_result = t1; | 3256 heap_number_result = t1; |
3257 BinaryOpStub_GenerateHeapResultAllocation(masm, | 3257 BinaryOpStub_GenerateHeapResultAllocation(masm, |
3258 heap_number_result, | 3258 heap_number_result, |
3259 heap_number_map, | 3259 heap_number_map, |
3260 scratch1, | 3260 scratch1, |
3261 scratch2, | 3261 scratch2, |
3262 &call_runtime, | 3262 &call_runtime, |
3263 mode_); | 3263 mode_); |
3264 | 3264 |
3265 if (CpuFeatures::IsSupported(FPU)) { | 3265 if (CpuFeatures::IsSupported(FPU)) { |
3266 CpuFeatures::Scope scope(FPU); | 3266 CpuFeatureScope scope(masm, FPU); |
3267 | 3267 |
3268 if (op_ != Token::SHR) { | 3268 if (op_ != Token::SHR) { |
3269 // Convert the result to a floating point value. | 3269 // Convert the result to a floating point value. |
3270 __ mtc1(a2, double_scratch); | 3270 __ mtc1(a2, double_scratch); |
3271 __ cvt_d_w(double_scratch, double_scratch); | 3271 __ cvt_d_w(double_scratch, double_scratch); |
3272 } else { | 3272 } else { |
3273 // The result must be interpreted as an unsigned 32-bit integer. | 3273 // The result must be interpreted as an unsigned 32-bit integer. |
3274 __ mtc1(a2, double_scratch); | 3274 __ mtc1(a2, double_scratch); |
3275 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); | 3275 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); |
3276 } | 3276 } |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3460 Label input_not_smi; | 3460 Label input_not_smi; |
3461 Label loaded; | 3461 Label loaded; |
3462 Label calculate; | 3462 Label calculate; |
3463 Label invalid_cache; | 3463 Label invalid_cache; |
3464 const Register scratch0 = t5; | 3464 const Register scratch0 = t5; |
3465 const Register scratch1 = t3; | 3465 const Register scratch1 = t3; |
3466 const Register cache_entry = a0; | 3466 const Register cache_entry = a0; |
3467 const bool tagged = (argument_type_ == TAGGED); | 3467 const bool tagged = (argument_type_ == TAGGED); |
3468 | 3468 |
3469 if (CpuFeatures::IsSupported(FPU)) { | 3469 if (CpuFeatures::IsSupported(FPU)) { |
3470 CpuFeatures::Scope scope(FPU); | 3470 CpuFeatureScope scope(masm, FPU); |
3471 | 3471 |
3472 if (tagged) { | 3472 if (tagged) { |
3473 // Argument is a number and is on stack and in a0. | 3473 // Argument is a number and is on stack and in a0. |
3474 // Load argument and check if it is a smi. | 3474 // Load argument and check if it is a smi. |
3475 __ JumpIfNotSmi(a0, &input_not_smi); | 3475 __ JumpIfNotSmi(a0, &input_not_smi); |
3476 | 3476 |
3477 // Input is a smi. Convert to double and load the low and high words | 3477 // Input is a smi. Convert to double and load the low and high words |
3478 // of the double into a2, a3. | 3478 // of the double into a2, a3. |
3479 __ sra(t0, a0, kSmiTagSize); | 3479 __ sra(t0, a0, kSmiTagSize); |
3480 __ mtc1(t0, f4); | 3480 __ mtc1(t0, f4); |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3570 __ IncrementCounter( | 3570 __ IncrementCounter( |
3571 counters->transcendental_cache_miss(), 1, scratch0, scratch1); | 3571 counters->transcendental_cache_miss(), 1, scratch0, scratch1); |
3572 if (tagged) { | 3572 if (tagged) { |
3573 __ bind(&invalid_cache); | 3573 __ bind(&invalid_cache); |
3574 __ TailCallExternalReference(ExternalReference(RuntimeFunction(), | 3574 __ TailCallExternalReference(ExternalReference(RuntimeFunction(), |
3575 masm->isolate()), | 3575 masm->isolate()), |
3576 1, | 3576 1, |
3577 1); | 3577 1); |
3578 } else { | 3578 } else { |
3579 ASSERT(CpuFeatures::IsSupported(FPU)); | 3579 ASSERT(CpuFeatures::IsSupported(FPU)); |
3580 CpuFeatures::Scope scope(FPU); | 3580 CpuFeatureScope scope(masm, FPU); |
3581 | 3581 |
3582 Label no_update; | 3582 Label no_update; |
3583 Label skip_cache; | 3583 Label skip_cache; |
3584 | 3584 |
3585 // Call C function to calculate the result and update the cache. | 3585 // Call C function to calculate the result and update the cache. |
3586 // a0: precalculated cache entry address. | 3586 // a0: precalculated cache entry address. |
3587 // a2 and a3: parts of the double value. | 3587 // a2 and a3: parts of the double value. |
3588 // Store a0, a2 and a3 on stack for later before calling C function. | 3588 // Store a0, a2 and a3 on stack for later before calling C function. |
3589 __ Push(a3, a2, cache_entry); | 3589 __ Push(a3, a2, cache_entry); |
3590 GenerateCallCFunction(masm, scratch0); | 3590 GenerateCallCFunction(masm, scratch0); |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3698 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); | 3698 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); |
3699 } | 3699 } |
3700 | 3700 |
3701 | 3701 |
3702 void InterruptStub::Generate(MacroAssembler* masm) { | 3702 void InterruptStub::Generate(MacroAssembler* masm) { |
3703 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); | 3703 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); |
3704 } | 3704 } |
3705 | 3705 |
3706 | 3706 |
3707 void MathPowStub::Generate(MacroAssembler* masm) { | 3707 void MathPowStub::Generate(MacroAssembler* masm) { |
3708 CpuFeatures::Scope fpu_scope(FPU); | 3708 CpuFeatureScope fpu_scope(masm, FPU); |
3709 const Register base = a1; | 3709 const Register base = a1; |
3710 const Register exponent = a2; | 3710 const Register exponent = a2; |
3711 const Register heapnumbermap = t1; | 3711 const Register heapnumbermap = t1; |
3712 const Register heapnumber = v0; | 3712 const Register heapnumber = v0; |
3713 const DoubleRegister double_base = f2; | 3713 const DoubleRegister double_base = f2; |
3714 const DoubleRegister double_exponent = f4; | 3714 const DoubleRegister double_exponent = f4; |
3715 const DoubleRegister double_result = f0; | 3715 const DoubleRegister double_result = f0; |
3716 const DoubleRegister double_scratch = f6; | 3716 const DoubleRegister double_scratch = f6; |
3717 const FPURegister single_scratch = f8; | 3717 const FPURegister single_scratch = f8; |
3718 const Register scratch = t5; | 3718 const Register scratch = t5; |
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3941 | 3941 |
3942 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 3942 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
3943 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) | 3943 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) |
3944 ? kSaveFPRegs | 3944 ? kSaveFPRegs |
3945 : kDontSaveFPRegs; | 3945 : kDontSaveFPRegs; |
3946 CEntryStub save_doubles(1, mode); | 3946 CEntryStub save_doubles(1, mode); |
3947 StoreBufferOverflowStub stub(mode); | 3947 StoreBufferOverflowStub stub(mode); |
3948 // These stubs might already be in the snapshot, detect that and don't | 3948 // These stubs might already be in the snapshot, detect that and don't |
3949 // regenerate, which would lead to code stub initialization state being messed | 3949 // regenerate, which would lead to code stub initialization state being messed |
3950 // up. | 3950 // up. |
3951 Code* save_doubles_code = NULL; | 3951 Code* save_doubles_code; |
3952 Code* store_buffer_overflow_code = NULL; | 3952 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { |
3953 if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) { | 3953 save_doubles_code = *save_doubles.GetCode(isolate); |
3954 if (CpuFeatures::IsSupported(FPU)) { | |
3955 CpuFeatures::Scope scope2(FPU); | |
3956 save_doubles_code = *save_doubles.GetCode(isolate); | |
3957 store_buffer_overflow_code = *stub.GetCode(isolate); | |
3958 } else { | |
3959 save_doubles_code = *save_doubles.GetCode(isolate); | |
3960 store_buffer_overflow_code = *stub.GetCode(isolate); | |
3961 } | |
3962 save_doubles_code->set_is_pregenerated(true); | 3954 save_doubles_code->set_is_pregenerated(true); |
| 3955 |
| 3956 Code* store_buffer_overflow_code = *stub.GetCode(isolate); |
3963 store_buffer_overflow_code->set_is_pregenerated(true); | 3957 store_buffer_overflow_code->set_is_pregenerated(true); |
3964 } | 3958 } |
3965 ISOLATE->set_fp_stubs_generated(true); | 3959 isolate->set_fp_stubs_generated(true); |
3966 } | 3960 } |
3967 | 3961 |
3968 | 3962 |
3969 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | 3963 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { |
3970 CEntryStub stub(1, kDontSaveFPRegs); | 3964 CEntryStub stub(1, kDontSaveFPRegs); |
3971 Handle<Code> code = stub.GetCode(isolate); | 3965 Handle<Code> code = stub.GetCode(isolate); |
3972 code->set_is_pregenerated(true); | 3966 code->set_is_pregenerated(true); |
3973 } | 3967 } |
3974 | 3968 |
3975 | 3969 |
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4211 // a3: argc | 4205 // a3: argc |
4212 // | 4206 // |
4213 // Stack: | 4207 // Stack: |
4214 // 4 args slots | 4208 // 4 args slots |
4215 // args | 4209 // args |
4216 | 4210 |
4217 // Save callee saved registers on the stack. | 4211 // Save callee saved registers on the stack. |
4218 __ MultiPush(kCalleeSaved | ra.bit()); | 4212 __ MultiPush(kCalleeSaved | ra.bit()); |
4219 | 4213 |
4220 if (CpuFeatures::IsSupported(FPU)) { | 4214 if (CpuFeatures::IsSupported(FPU)) { |
4221 CpuFeatures::Scope scope(FPU); | 4215 CpuFeatureScope scope(masm, FPU); |
4222 // Save callee-saved FPU registers. | 4216 // Save callee-saved FPU registers. |
4223 __ MultiPushFPU(kCalleeSavedFPU); | 4217 __ MultiPushFPU(kCalleeSavedFPU); |
4224 // Set up the reserved register for 0.0. | 4218 // Set up the reserved register for 0.0. |
4225 __ Move(kDoubleRegZero, 0.0); | 4219 __ Move(kDoubleRegZero, 0.0); |
4226 } | 4220 } |
4227 | 4221 |
4228 | 4222 |
4229 // Load argv in s0 register. | 4223 // Load argv in s0 register. |
4230 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; | 4224 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
4231 if (CpuFeatures::IsSupported(FPU)) { | 4225 if (CpuFeatures::IsSupported(FPU)) { |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4360 // Restore the top frame descriptors from the stack. | 4354 // Restore the top frame descriptors from the stack. |
4361 __ pop(t1); | 4355 __ pop(t1); |
4362 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, | 4356 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
4363 isolate))); | 4357 isolate))); |
4364 __ sw(t1, MemOperand(t0)); | 4358 __ sw(t1, MemOperand(t0)); |
4365 | 4359 |
4366 // Reset the stack to the callee saved registers. | 4360 // Reset the stack to the callee saved registers. |
4367 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); | 4361 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); |
4368 | 4362 |
4369 if (CpuFeatures::IsSupported(FPU)) { | 4363 if (CpuFeatures::IsSupported(FPU)) { |
4370 CpuFeatures::Scope scope(FPU); | 4364 CpuFeatureScope scope(masm, FPU); |
4371 // Restore callee-saved fpu registers. | 4365 // Restore callee-saved fpu registers. |
4372 __ MultiPopFPU(kCalleeSavedFPU); | 4366 __ MultiPopFPU(kCalleeSavedFPU); |
4373 } | 4367 } |
4374 | 4368 |
4375 // Restore callee saved registers from the stack. | 4369 // Restore callee saved registers from the stack. |
4376 __ MultiPop(kCalleeSaved | ra.bit()); | 4370 __ MultiPop(kCalleeSaved | ra.bit()); |
4377 // Return. | 4371 // Return. |
4378 __ Jump(ra); | 4372 __ Jump(ra); |
4379 } | 4373 } |
4380 | 4374 |
(...skipping 2671 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7052 if (left_ == CompareIC::SMI) { | 7046 if (left_ == CompareIC::SMI) { |
7053 __ JumpIfNotSmi(a1, &miss); | 7047 __ JumpIfNotSmi(a1, &miss); |
7054 } | 7048 } |
7055 if (right_ == CompareIC::SMI) { | 7049 if (right_ == CompareIC::SMI) { |
7056 __ JumpIfNotSmi(a0, &miss); | 7050 __ JumpIfNotSmi(a0, &miss); |
7057 } | 7051 } |
7058 | 7052 |
7059 // Inlining the double comparison and falling back to the general compare | 7053 // Inlining the double comparison and falling back to the general compare |
7060 // stub if NaN is involved or FPU is unsupported. | 7054 // stub if NaN is involved or FPU is unsupported. |
7061 if (CpuFeatures::IsSupported(FPU)) { | 7055 if (CpuFeatures::IsSupported(FPU)) { |
7062 CpuFeatures::Scope scope(FPU); | 7056 CpuFeatureScope scope(masm, FPU); |
7063 | 7057 |
7064 // Load left and right operand. | 7058 // Load left and right operand. |
7065 Label done, left, left_smi, right_smi; | 7059 Label done, left, left_smi, right_smi; |
7066 __ JumpIfSmi(a0, &right_smi); | 7060 __ JumpIfSmi(a0, &right_smi); |
7067 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 7061 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
7068 DONT_DO_SMI_CHECK); | 7062 DONT_DO_SMI_CHECK); |
7069 __ Subu(a2, a0, Operand(kHeapObjectTag)); | 7063 __ Subu(a2, a0, Operand(kHeapObjectTag)); |
7070 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); | 7064 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); |
7071 __ Branch(&left); | 7065 __ Branch(&left); |
7072 __ bind(&right_smi); | 7066 __ bind(&right_smi); |
(...skipping 988 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8061 __ Pop(ra, t1, a1); | 8055 __ Pop(ra, t1, a1); |
8062 __ Ret(); | 8056 __ Ret(); |
8063 } | 8057 } |
8064 | 8058 |
8065 | 8059 |
8066 #undef __ | 8060 #undef __ |
8067 | 8061 |
8068 } } // namespace v8::internal | 8062 } } // namespace v8::internal |
8069 | 8063 |
8070 #endif // V8_TARGET_ARCH_MIPS | 8064 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |