Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/codegen-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 597 matching lines...) Expand 10 before | Expand all | Expand 10 after
608 // Set dst1 to 0. 608 // Set dst1 to 0.
609 __ mov(dst1, zero_reg); 609 __ mov(dst1, zero_reg);
610 } 610 }
611 __ bind(&done); 611 __ bind(&done);
612 } 612 }
613 613
614 614
615 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, 615 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
616 Register object, 616 Register object,
617 Destination destination, 617 Destination destination,
618 FPURegister double_dst, 618 DoubleRegister double_dst,
619 Register dst1, 619 Register dst1,
620 Register dst2, 620 Register dst2,
621 Register heap_number_map, 621 Register heap_number_map,
622 Register scratch1, 622 Register scratch1,
623 Register scratch2, 623 Register scratch2,
624 FPURegister single_scratch, 624 FPURegister single_scratch,
625 Label* not_int32) { 625 Label* not_int32) {
626 ASSERT(!scratch1.is(object) && !scratch2.is(object)); 626 ASSERT(!scratch1.is(object) && !scratch2.is(object));
627 ASSERT(!scratch1.is(scratch2)); 627 ASSERT(!scratch1.is(scratch2));
628 ASSERT(!heap_number_map.is(object) && 628 ASSERT(!heap_number_map.is(object) &&
(...skipping 15 matching lines...) Expand all
644 "HeapNumberMap register clobbered."); 644 "HeapNumberMap register clobbered.");
645 } 645 }
646 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); 646 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
647 647
648 // Load the number. 648 // Load the number.
649 if (CpuFeatures::IsSupported(FPU)) { 649 if (CpuFeatures::IsSupported(FPU)) {
650 CpuFeatures::Scope scope(FPU); 650 CpuFeatures::Scope scope(FPU);
651 // Load the double value. 651 // Load the double value.
652 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); 652 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
653 653
654 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). 654 Register except_flag = scratch2;
655 // On MIPS a lot of things cannot be implemented the same way so right 655 __ EmitFPUTruncate(kRoundToZero,
656 // now it makes a lot more sense to just do things manually. 656 single_scratch,
657 657 double_dst,
658 // Save FCSR. 658 scratch1,
659 __ cfc1(scratch1, FCSR); 659 except_flag,
660 // Disable FPU exceptions. 660 kCheckForInexactConversion);
661 __ ctc1(zero_reg, FCSR);
662 __ trunc_w_d(single_scratch, double_dst);
663 // Retrieve FCSR.
664 __ cfc1(scratch2, FCSR);
665 // Restore FCSR.
666 __ ctc1(scratch1, FCSR);
667
668 // Check for inexact conversion or exception.
669 __ And(scratch2, scratch2, kFCSRFlagMask);
670 661
671 // Jump to not_int32 if the operation did not succeed. 662 // Jump to not_int32 if the operation did not succeed.
672 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); 663 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
673 664
674 if (destination == kCoreRegisters) { 665 if (destination == kCoreRegisters) {
675 __ Move(dst1, dst2, double_dst); 666 __ Move(dst1, dst2, double_dst);
676 } 667 }
677 668
678 } else { 669 } else {
679 ASSERT(!scratch1.is(object) && !scratch2.is(object)); 670 ASSERT(!scratch1.is(object) && !scratch2.is(object));
680 // Load the double value in the destination registers. 671 // Load the double value in the destination registers.
681 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); 672 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
682 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); 673 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
(...skipping 16 matching lines...) Expand all
699 } 690 }
700 691
701 692
702 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, 693 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
703 Register object, 694 Register object,
704 Register dst, 695 Register dst,
705 Register heap_number_map, 696 Register heap_number_map,
706 Register scratch1, 697 Register scratch1,
707 Register scratch2, 698 Register scratch2,
708 Register scratch3, 699 Register scratch3,
709 FPURegister double_scratch, 700 DoubleRegister double_scratch,
710 Label* not_int32) { 701 Label* not_int32) {
711 ASSERT(!dst.is(object)); 702 ASSERT(!dst.is(object));
712 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); 703 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
713 ASSERT(!scratch1.is(scratch2) && 704 ASSERT(!scratch1.is(scratch2) &&
714 !scratch1.is(scratch3) && 705 !scratch1.is(scratch3) &&
715 !scratch2.is(scratch3)); 706 !scratch2.is(scratch3));
716 707
717 Label done; 708 Label done;
718 709
719 // Untag the object into the destination register. 710 // Untag the object into the destination register.
720 __ SmiUntag(dst, object); 711 __ SmiUntag(dst, object);
721 // Just return if the object is a smi. 712 // Just return if the object is a smi.
722 __ JumpIfSmi(object, &done); 713 __ JumpIfSmi(object, &done);
723 714
724 if (FLAG_debug_code) { 715 if (FLAG_debug_code) {
725 __ AbortIfNotRootValue(heap_number_map, 716 __ AbortIfNotRootValue(heap_number_map,
726 Heap::kHeapNumberMapRootIndex, 717 Heap::kHeapNumberMapRootIndex,
727 "HeapNumberMap register clobbered."); 718 "HeapNumberMap register clobbered.");
728 } 719 }
729 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); 720 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
730 721
731 // Object is a heap number. 722 // Object is a heap number.
732 // Convert the floating point value to a 32-bit integer. 723 // Convert the floating point value to a 32-bit integer.
733 if (CpuFeatures::IsSupported(FPU)) { 724 if (CpuFeatures::IsSupported(FPU)) {
734 CpuFeatures::Scope scope(FPU); 725 CpuFeatures::Scope scope(FPU);
735 // Load the double value. 726 // Load the double value.
736 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); 727 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
737 728
738 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). 729 FPURegister single_scratch = double_scratch.low();
739 // On MIPS a lot of things cannot be implemented the same way so right 730 Register except_flag = scratch2;
740 // now it makes a lot more sense to just do things manually. 731 __ EmitFPUTruncate(kRoundToZero,
741 732 single_scratch,
742 // Save FCSR. 733 double_scratch,
743 __ cfc1(scratch1, FCSR); 734 scratch1,
744 // Disable FPU exceptions. 735 except_flag,
745 __ ctc1(zero_reg, FCSR); 736 kCheckForInexactConversion);
746 __ trunc_w_d(double_scratch, double_scratch);
747 // Retrieve FCSR.
748 __ cfc1(scratch2, FCSR);
749 // Restore FCSR.
750 __ ctc1(scratch1, FCSR);
751
752 // Check for inexact conversion or exception.
753 __ And(scratch2, scratch2, kFCSRFlagMask);
754 737
755 // Jump to not_int32 if the operation did not succeed. 738 // Jump to not_int32 if the operation did not succeed.
756 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); 739 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
757 // Get the result in the destination register. 740 // Get the result in the destination register.
758 __ mfc1(dst, double_scratch); 741 __ mfc1(dst, single_scratch);
759 742
760 } else { 743 } else {
761 // Load the double value in the destination registers. 744 // Load the double value in the destination registers.
762 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset)); 745 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
763 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); 746 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
764 747
765 // Check for 0 and -0. 748 // Check for 0 and -0.
766 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask)); 749 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
767 __ Or(dst, scratch2, Operand(dst)); 750 __ Or(dst, scratch2, Operand(dst));
768 __ Branch(&done, eq, dst, Operand(zero_reg)); 751 __ Branch(&done, eq, dst, Operand(zero_reg));
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
874 if (!IsMipsSoftFloatABI) { 857 if (!IsMipsSoftFloatABI) {
875 CpuFeatures::Scope scope(FPU); 858 CpuFeatures::Scope scope(FPU);
876 // We are not using MIPS FPU instructions, and parameters for the runtime 859 // We are not using MIPS FPU instructions, and parameters for the runtime
877 // function call are prepaired in a0-a3 registers, but function we are 860 // function call are prepaired in a0-a3 registers, but function we are
878 // calling is compiled with hard-float flag and expecting hard float ABI 861 // calling is compiled with hard-float flag and expecting hard float ABI
879 // (parameters in f12/f14 registers). We need to copy parameters from 862 // (parameters in f12/f14 registers). We need to copy parameters from
880 // a0-a3 registers to f12/f14 register pairs. 863 // a0-a3 registers to f12/f14 register pairs.
881 __ Move(f12, a0, a1); 864 __ Move(f12, a0, a1);
882 __ Move(f14, a2, a3); 865 __ Move(f14, a2, a3);
883 } 866 }
884 // Call C routine that may not cause GC or other trouble. 867 {
885 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), 868 AllowExternalCallThatCantCauseGC scope(masm);
886 4); 869 __ CallCFunction(
870 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
871 }
887 // Store answer in the overwritable heap number. 872 // Store answer in the overwritable heap number.
888 if (!IsMipsSoftFloatABI) { 873 if (!IsMipsSoftFloatABI) {
889 CpuFeatures::Scope scope(FPU); 874 CpuFeatures::Scope scope(FPU);
890 // Double returned in register f0. 875 // Double returned in register f0.
891 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); 876 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
892 } else { 877 } else {
893 // Double returned in registers v0 and v1. 878 // Double returned in registers v0 and v1.
894 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); 879 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
895 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); 880 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
896 } 881 }
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after
1251 1236
1252 __ bind(&return_result_equal); 1237 __ bind(&return_result_equal);
1253 __ li(v0, Operand(EQUAL)); 1238 __ li(v0, Operand(EQUAL));
1254 __ Ret(); 1239 __ Ret();
1255 } 1240 }
1256 1241
1257 __ bind(&return_result_not_equal); 1242 __ bind(&return_result_not_equal);
1258 1243
1259 if (!CpuFeatures::IsSupported(FPU)) { 1244 if (!CpuFeatures::IsSupported(FPU)) {
1260 __ push(ra); 1245 __ push(ra);
1261 __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments. 1246 __ PrepareCallCFunction(0, 2, t4);
1262 if (!IsMipsSoftFloatABI) { 1247 if (!IsMipsSoftFloatABI) {
1263 // We are not using MIPS FPU instructions, and parameters for the runtime 1248 // We are not using MIPS FPU instructions, and parameters for the runtime
1264 // function call are prepaired in a0-a3 registers, but function we are 1249 // function call are prepaired in a0-a3 registers, but function we are
1265 // calling is compiled with hard-float flag and expecting hard float ABI 1250 // calling is compiled with hard-float flag and expecting hard float ABI
1266 // (parameters in f12/f14 registers). We need to copy parameters from 1251 // (parameters in f12/f14 registers). We need to copy parameters from
1267 // a0-a3 registers to f12/f14 register pairs. 1252 // a0-a3 registers to f12/f14 register pairs.
1268 __ Move(f12, a0, a1); 1253 __ Move(f12, a0, a1);
1269 __ Move(f14, a2, a3); 1254 __ Move(f14, a2, a3);
1270 } 1255 }
1271 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4); 1256 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1257 0, 2);
1272 __ pop(ra); // Because this function returns int, result is in v0. 1258 __ pop(ra); // Because this function returns int, result is in v0.
1273 __ Ret(); 1259 __ Ret();
1274 } else { 1260 } else {
1275 CpuFeatures::Scope scope(FPU); 1261 CpuFeatures::Scope scope(FPU);
1276 Label equal, less_than; 1262 Label equal, less_than;
1277 __ c(EQ, D, f12, f14); 1263 __ BranchF(&equal, NULL, eq, f12, f14);
1278 __ bc1t(&equal); 1264 __ BranchF(&less_than, NULL, lt, f12, f14);
1279 __ nop();
1280
1281 __ c(OLT, D, f12, f14);
1282 __ bc1t(&less_than);
1283 __ nop();
1284 1265
1285 // Not equal, not less, not NaN, must be greater. 1266 // Not equal, not less, not NaN, must be greater.
1286 __ li(v0, Operand(GREATER)); 1267 __ li(v0, Operand(GREATER));
1287 __ Ret(); 1268 __ Ret();
1288 1269
1289 __ bind(&equal); 1270 __ bind(&equal);
1290 __ li(v0, Operand(EQUAL)); 1271 __ li(v0, Operand(EQUAL));
1291 __ Ret(); 1272 __ Ret();
1292 1273
1293 __ bind(&less_than); 1274 __ bind(&less_than);
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
1466 // of two pointer sized fields. 1447 // of two pointer sized fields.
1467 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); 1448 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1468 __ Addu(scratch1, number_string_cache, scratch1); 1449 __ Addu(scratch1, number_string_cache, scratch1);
1469 1450
1470 Register probe = mask; 1451 Register probe = mask;
1471 __ lw(probe, 1452 __ lw(probe,
1472 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); 1453 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1473 __ JumpIfSmi(probe, not_found); 1454 __ JumpIfSmi(probe, not_found);
1474 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); 1455 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1475 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); 1456 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1476 __ c(EQ, D, f12, f14); 1457 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
1477 __ bc1t(&load_result_from_cache);
1478 __ nop(); // bc1t() requires explicit fill of branch delay slot.
1479 __ Branch(not_found); 1458 __ Branch(not_found);
1480 } else { 1459 } else {
1481 // Note that there is no cache check for non-FPU case, even though 1460 // Note that there is no cache check for non-FPU case, even though
1482 // it seems there could be. May be a tiny opimization for non-FPU 1461 // it seems there could be. May be a tiny opimization for non-FPU
1483 // cores. 1462 // cores.
1484 __ Branch(not_found); 1463 __ Branch(not_found);
1485 } 1464 }
1486 } 1465 }
1487 1466
1488 __ bind(&is_smi); 1467 __ bind(&is_smi);
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
1584 1563
1585 Isolate* isolate = masm->isolate(); 1564 Isolate* isolate = masm->isolate();
1586 if (CpuFeatures::IsSupported(FPU)) { 1565 if (CpuFeatures::IsSupported(FPU)) {
1587 CpuFeatures::Scope scope(FPU); 1566 CpuFeatures::Scope scope(FPU);
1588 Label nan; 1567 Label nan;
1589 __ li(t0, Operand(LESS)); 1568 __ li(t0, Operand(LESS));
1590 __ li(t1, Operand(GREATER)); 1569 __ li(t1, Operand(GREATER));
1591 __ li(t2, Operand(EQUAL)); 1570 __ li(t2, Operand(EQUAL));
1592 1571
1593 // Check if either rhs or lhs is NaN. 1572 // Check if either rhs or lhs is NaN.
1594 __ c(UN, D, f12, f14); 1573 __ BranchF(NULL, &nan, eq, f12, f14);
1595 __ bc1t(&nan);
1596 __ nop();
1597 1574
1598 // Check if LESS condition is satisfied. If true, move conditionally 1575 // Check if LESS condition is satisfied. If true, move conditionally
1599 // result to v0. 1576 // result to v0.
1600 __ c(OLT, D, f12, f14); 1577 __ c(OLT, D, f12, f14);
1601 __ movt(v0, t0); 1578 __ movt(v0, t0);
1602 // Use previous check to store conditionally to v0 oposite condition 1579 // Use previous check to store conditionally to v0 oposite condition
1603 // (GREATER). If rhs is equal to lhs, this will be corrected in next 1580 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1604 // check. 1581 // check.
1605 __ movf(v0, t1); 1582 __ movf(v0, t1);
1606 // Check if EQUAL condition is satisfied. If true, move conditionally 1583 // Check if EQUAL condition is satisfied. If true, move conditionally
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
1704 __ li(a0, Operand(Smi::FromInt(ncr))); 1681 __ li(a0, Operand(Smi::FromInt(ncr)));
1705 __ push(a0); 1682 __ push(a0);
1706 } 1683 }
1707 1684
1708 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 1685 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1709 // tagged as a small integer. 1686 // tagged as a small integer.
1710 __ InvokeBuiltin(native, JUMP_FUNCTION); 1687 __ InvokeBuiltin(native, JUMP_FUNCTION);
1711 } 1688 }
1712 1689
1713 1690
1714 // The stub returns zero for false, and a non-zero value for true. 1691 // The stub expects its argument in the tos_ register and returns its result in
1692 // it, too: zero for false, and a non-zero value for true.
1715 void ToBooleanStub::Generate(MacroAssembler* masm) { 1693 void ToBooleanStub::Generate(MacroAssembler* masm) {
1716 // This stub uses FPU instructions. 1694 // This stub uses FPU instructions.
1717 CpuFeatures::Scope scope(FPU); 1695 CpuFeatures::Scope scope(FPU);
1718 1696
1719 Label false_result; 1697 Label patch;
1720 Label not_heap_number; 1698 const Register map = t5.is(tos_) ? t3 : t5;
1721 Register scratch0 = t5.is(tos_) ? t3 : t5;
1722 1699
1723 // undefined -> false 1700 // undefined -> false.
1724 __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex); 1701 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1725 __ Branch(&false_result, eq, tos_, Operand(scratch0));
1726 1702
1727 // Boolean -> its value 1703 // Boolean -> its value.
1728 __ LoadRoot(scratch0, Heap::kFalseValueRootIndex); 1704 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1729 __ Branch(&false_result, eq, tos_, Operand(scratch0)); 1705 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1730 __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
1731 // "tos_" is a register and contains a non-zero value. Hence we implicitly
1732 // return true if the equal condition is satisfied.
1733 __ Ret(eq, tos_, Operand(scratch0));
1734 1706
1735 // Smis: 0 -> false, all other -> true 1707 // 'null' -> false.
1736 __ And(scratch0, tos_, tos_); 1708 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1737 __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
1738 __ And(scratch0, tos_, Operand(kSmiTagMask));
1739 // "tos_" is a register and contains a non-zero value. Hence we implicitly
1740 // return true if the not equal condition is satisfied.
1741 __ Ret(eq, scratch0, Operand(zero_reg));
1742 1709
1743 // 'null' -> false 1710 if (types_.Contains(SMI)) {
1744 __ LoadRoot(scratch0, Heap::kNullValueRootIndex); 1711 // Smis: 0 -> false, all other -> true
1745 __ Branch(&false_result, eq, tos_, Operand(scratch0)); 1712 __ And(at, tos_, kSmiTagMask);
1713 // tos_ contains the correct return value already
1714 __ Ret(eq, at, Operand(zero_reg));
1715 } else if (types_.NeedsMap()) {
1716 // If we need a map later and have a Smi -> patch.
1717 __ JumpIfSmi(tos_, &patch);
1718 }
1746 1719
1747 // HeapNumber => false if +0, -0, or NaN. 1720 if (types_.NeedsMap()) {
1748 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); 1721 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1749 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1750 __ Branch(&not_heap_number, ne, scratch0, Operand(at));
1751 1722
1752 __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset)); 1723 if (types_.CanBeUndetectable()) {
1753 __ fcmp(f12, 0.0, UEQ); 1724 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1725 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1726 // Undetectable -> false.
1727 __ movn(tos_, zero_reg, at);
1728 __ Ret(ne, at, Operand(zero_reg));
1729 }
1730 }
1754 1731
1755 // "tos_" is a register, and contains a non zero value by default. 1732 if (types_.Contains(SPEC_OBJECT)) {
1756 // Hence we only need to overwrite "tos_" with zero to return false for 1733 // Spec object -> true.
1757 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. 1734 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1758 __ movt(tos_, zero_reg); 1735 // tos_ contains the correct non-zero return value already.
1759 __ Ret(); 1736 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1737 }
1760 1738
1761 __ bind(&not_heap_number); 1739 if (types_.Contains(STRING)) {
1740 // String value -> false iff empty.
1741 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1742 Label skip;
1743 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1744 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1745 __ Ret(); // the string length is OK as the return value
1746 __ bind(&skip);
1747 }
1762 1748
1763 // It can be an undetectable object. 1749 if (types_.Contains(HEAP_NUMBER)) {
1764 // Undetectable => false. 1750 // Heap number -> false iff +0, -0, or NaN.
1765 __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset)); 1751 Label not_heap_number;
1766 __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset)); 1752 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1767 __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); 1753 __ Branch(&not_heap_number, ne, map, Operand(at));
1768 __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable)); 1754 Label zero_or_nan, number;
1755 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1756 __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
1757 // "tos_" is a register, and contains a non zero value by default.
1758 // Hence we only need to overwrite "tos_" with zero to return false for
1759 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1760 __ bind(&zero_or_nan);
1761 __ mov(tos_, zero_reg);
1762 __ bind(&number);
1763 __ Ret();
1764 __ bind(&not_heap_number);
1765 }
1769 1766
1770 // JavaScript object => true. 1767 __ bind(&patch);
1771 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); 1768 GenerateTypeTransition(masm);
1772 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1773
1774 // "tos_" is a register and contains a non-zero value.
1775 // Hence we implicitly return true if the greater than
1776 // condition is satisfied.
1777 __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
1778
1779 // Check for string.
1780 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1781 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1782 // "tos_" is a register and contains a non-zero value.
1783 // Hence we implicitly return true if the greater than
1784 // condition is satisfied.
1785 __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
1786
1787 // String value => false iff empty, i.e., length is zero.
1788 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1789 // If length is zero, "tos_" contains zero ==> false.
1790 // If length is not zero, "tos_" contains a non-zero value ==> true.
1791 __ Ret();
1792
1793 // Return 0 in "tos_" for false.
1794 __ bind(&false_result);
1795 __ mov(tos_, zero_reg);
1796 __ Ret();
1797 } 1769 }
1798 1770
1799 1771
1772 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1773 Type type,
1774 Heap::RootListIndex value,
1775 bool result) {
1776 if (types_.Contains(type)) {
1777 // If we see an expected oddball, return its ToBoolean value tos_.
1778 __ LoadRoot(at, value);
1779 __ Subu(at, at, tos_); // This is a check for equality for the movz below.
1780 // The value of a root is never NULL, so we can avoid loading a non-null
1781 // value into tos_ when we want to return 'true'.
1782 if (!result) {
1783 __ movz(tos_, zero_reg, at);
1784 }
1785 __ Ret(eq, at, Operand(zero_reg));
1786 }
1787 }
1788
1789
1790 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1791 __ Move(a3, tos_);
1792 __ li(a2, Operand(Smi::FromInt(tos_.code())));
1793 __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
1794 __ Push(a3, a2, a1);
1795 // Patch the caller to an appropriate specialized stub and return the
1796 // operation result to the caller of the stub.
1797 __ TailCallExternalReference(
1798 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1799 3,
1800 1);
1801 }
1802
1803
1800 void UnaryOpStub::PrintName(StringStream* stream) { 1804 void UnaryOpStub::PrintName(StringStream* stream) {
1801 const char* op_name = Token::Name(op_); 1805 const char* op_name = Token::Name(op_);
1802 const char* overwrite_name = NULL; // Make g++ happy. 1806 const char* overwrite_name = NULL; // Make g++ happy.
1803 switch (mode_) { 1807 switch (mode_) {
1804 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; 1808 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1805 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; 1809 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1806 } 1810 }
1807 stream->Add("UnaryOpStub_%s_%s_%s", 1811 stream->Add("UnaryOpStub_%s_%s_%s",
1808 op_name, 1812 op_name,
1809 overwrite_name, 1813 overwrite_name,
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
1944 if (mode_ == UNARY_OVERWRITE) { 1948 if (mode_ == UNARY_OVERWRITE) {
1945 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1949 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1946 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. 1950 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1947 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1951 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1948 } else { 1952 } else {
1949 Label slow_allocate_heapnumber, heapnumber_allocated; 1953 Label slow_allocate_heapnumber, heapnumber_allocated;
1950 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber); 1954 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
1951 __ jmp(&heapnumber_allocated); 1955 __ jmp(&heapnumber_allocated);
1952 1956
1953 __ bind(&slow_allocate_heapnumber); 1957 __ bind(&slow_allocate_heapnumber);
1954 __ EnterInternalFrame(); 1958 {
1955 __ push(a0); 1959 FrameScope scope(masm, StackFrame::INTERNAL);
1956 __ CallRuntime(Runtime::kNumberAlloc, 0); 1960 __ push(a0);
1957 __ mov(a1, v0); 1961 __ CallRuntime(Runtime::kNumberAlloc, 0);
1958 __ pop(a0); 1962 __ mov(a1, v0);
1959 __ LeaveInternalFrame(); 1963 __ pop(a0);
1964 }
1960 1965
1961 __ bind(&heapnumber_allocated); 1966 __ bind(&heapnumber_allocated);
1962 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); 1967 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1963 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1968 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1964 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); 1969 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
1965 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. 1970 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
1966 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); 1971 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
1967 __ mov(v0, a1); 1972 __ mov(v0, a1);
1968 } 1973 }
1969 __ Ret(); 1974 __ Ret();
(...skipping 21 matching lines...) Expand all
1991 1996
1992 // Try to store the result in a heap number. 1997 // Try to store the result in a heap number.
1993 __ bind(&try_float); 1998 __ bind(&try_float);
1994 if (mode_ == UNARY_NO_OVERWRITE) { 1999 if (mode_ == UNARY_NO_OVERWRITE) {
1995 Label slow_allocate_heapnumber, heapnumber_allocated; 2000 Label slow_allocate_heapnumber, heapnumber_allocated;
1996 // Allocate a new heap number without zapping v0, which we need if it fails. 2001 // Allocate a new heap number without zapping v0, which we need if it fails.
1997 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber); 2002 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
1998 __ jmp(&heapnumber_allocated); 2003 __ jmp(&heapnumber_allocated);
1999 2004
2000 __ bind(&slow_allocate_heapnumber); 2005 __ bind(&slow_allocate_heapnumber);
2001 __ EnterInternalFrame(); 2006 {
2002 __ push(v0); // Push the heap number, not the untagged int32. 2007 FrameScope scope(masm, StackFrame::INTERNAL);
2003 __ CallRuntime(Runtime::kNumberAlloc, 0); 2008 __ push(v0); // Push the heap number, not the untagged int32.
2004 __ mov(a2, v0); // Move the new heap number into a2. 2009 __ CallRuntime(Runtime::kNumberAlloc, 0);
2005 // Get the heap number into v0, now that the new heap number is in a2. 2010 __ mov(a2, v0); // Move the new heap number into a2.
2006 __ pop(v0); 2011 // Get the heap number into v0, now that the new heap number is in a2.
2007 __ LeaveInternalFrame(); 2012 __ pop(v0);
2013 }
2008 2014
2009 // Convert the heap number in v0 to an untagged integer in a1. 2015 // Convert the heap number in v0 to an untagged integer in a1.
2010 // This can't go slow-case because it's the same number we already 2016 // This can't go slow-case because it's the same number we already
2011 // converted once again. 2017 // converted once again.
2012 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); 2018 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2013 // Negate the result. 2019 // Negate the result.
2014 __ Xor(a1, a1, -1); 2020 __ Xor(a1, a1, -1);
2015 2021
2016 __ bind(&heapnumber_allocated); 2022 __ bind(&heapnumber_allocated);
2017 __ mov(v0, a2); // Move newly allocated heap number to v0. 2023 __ mov(v0, a2); // Move newly allocated heap number to v0.
(...skipping 692 matching lines...) Expand 10 before | Expand all | Expand 10 after
2710 default: 2716 default:
2711 UNREACHABLE(); 2717 UNREACHABLE();
2712 } 2718 }
2713 2719
2714 if (op_ != Token::DIV) { 2720 if (op_ != Token::DIV) {
2715 // These operations produce an integer result. 2721 // These operations produce an integer result.
2716 // Try to return a smi if we can. 2722 // Try to return a smi if we can.
2717 // Otherwise return a heap number if allowed, or jump to type 2723 // Otherwise return a heap number if allowed, or jump to type
2718 // transition. 2724 // transition.
2719 2725
2720 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). 2726 Register except_flag = scratch2;
2721 // On MIPS a lot of things cannot be implemented the same way so right 2727 __ EmitFPUTruncate(kRoundToZero,
2722 // now it makes a lot more sense to just do things manually. 2728 single_scratch,
2723 2729 f10,
2724 // Save FCSR. 2730 scratch1,
2725 __ cfc1(scratch1, FCSR); 2731 except_flag);
2726 // Disable FPU exceptions.
2727 __ ctc1(zero_reg, FCSR);
2728 __ trunc_w_d(single_scratch, f10);
2729 // Retrieve FCSR.
2730 __ cfc1(scratch2, FCSR);
2731 // Restore FCSR.
2732 __ ctc1(scratch1, FCSR);
2733
2734 // Check for inexact conversion or exception.
2735 __ And(scratch2, scratch2, kFCSRFlagMask);
2736 2732
2737 if (result_type_ <= BinaryOpIC::INT32) { 2733 if (result_type_ <= BinaryOpIC::INT32) {
2738 // If scratch2 != 0, result does not fit in a 32-bit integer. 2734 // If except_flag != 0, result does not fit in a 32-bit integer.
2739 __ Branch(&transition, ne, scratch2, Operand(zero_reg)); 2735 __ Branch(&transition, ne, except_flag, Operand(zero_reg));
2740 } 2736 }
2741 2737
2742 // Check if the result fits in a smi. 2738 // Check if the result fits in a smi.
2743 __ mfc1(scratch1, single_scratch); 2739 __ mfc1(scratch1, single_scratch);
2744 __ Addu(scratch2, scratch1, Operand(0x40000000)); 2740 __ Addu(scratch2, scratch1, Operand(0x40000000));
2745 // If not try to return a heap number. 2741 // If not try to return a heap number.
2746 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); 2742 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2747 // Check for minus zero. Return heap number for minus zero. 2743 // Check for minus zero. Return heap number for minus zero.
2748 Label not_zero; 2744 Label not_zero;
2749 __ Branch(&not_zero, ne, scratch1, Operand(zero_reg)); 2745 __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
(...skipping 468 matching lines...) Expand 10 before | Expand all | Expand 10 after
3218 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12]. 3214 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3219 __ sll(t0, a1, 1); 3215 __ sll(t0, a1, 1);
3220 __ Addu(a1, a1, t0); 3216 __ Addu(a1, a1, t0);
3221 __ sll(t0, a1, 2); 3217 __ sll(t0, a1, 2);
3222 __ Addu(cache_entry, cache_entry, t0); 3218 __ Addu(cache_entry, cache_entry, t0);
3223 3219
3224 // Check if cache matches: Double value is stored in uint32_t[2] array. 3220 // Check if cache matches: Double value is stored in uint32_t[2] array.
3225 __ lw(t0, MemOperand(cache_entry, 0)); 3221 __ lw(t0, MemOperand(cache_entry, 0));
3226 __ lw(t1, MemOperand(cache_entry, 4)); 3222 __ lw(t1, MemOperand(cache_entry, 4));
3227 __ lw(t2, MemOperand(cache_entry, 8)); 3223 __ lw(t2, MemOperand(cache_entry, 8));
3228 __ Addu(cache_entry, cache_entry, 12);
3229 __ Branch(&calculate, ne, a2, Operand(t0)); 3224 __ Branch(&calculate, ne, a2, Operand(t0));
3230 __ Branch(&calculate, ne, a3, Operand(t1)); 3225 __ Branch(&calculate, ne, a3, Operand(t1));
3231 // Cache hit. Load result, cleanup and return. 3226 // Cache hit. Load result, cleanup and return.
3232 if (tagged) { 3227 if (tagged) {
3233 // Pop input value from stack and load result into v0. 3228 // Pop input value from stack and load result into v0.
3234 __ Drop(1); 3229 __ Drop(1);
3235 __ mov(v0, t2); 3230 __ mov(v0, t2);
3236 } else { 3231 } else {
3237 // Load result into f4. 3232 // Load result into f4.
3238 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); 3233 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
(...skipping 13 matching lines...) Expand all
3252 CpuFeatures::Scope scope(FPU); 3247 CpuFeatures::Scope scope(FPU);
3253 3248
3254 Label no_update; 3249 Label no_update;
3255 Label skip_cache; 3250 Label skip_cache;
3256 const Register heap_number_map = t2; 3251 const Register heap_number_map = t2;
3257 3252
3258 // Call C function to calculate the result and update the cache. 3253 // Call C function to calculate the result and update the cache.
3259 // Register a0 holds precalculated cache entry address; preserve 3254 // Register a0 holds precalculated cache entry address; preserve
3260 // it on the stack and pop it into register cache_entry after the 3255 // it on the stack and pop it into register cache_entry after the
3261 // call. 3256 // call.
3262 __ push(cache_entry); 3257 __ Push(cache_entry, a2, a3);
3263 GenerateCallCFunction(masm, scratch0); 3258 GenerateCallCFunction(masm, scratch0);
3264 __ GetCFunctionDoubleResult(f4); 3259 __ GetCFunctionDoubleResult(f4);
3265 3260
3266 // Try to update the cache. If we cannot allocate a 3261 // Try to update the cache. If we cannot allocate a
3267 // heap number, we return the result without updating. 3262 // heap number, we return the result without updating.
3268 __ pop(cache_entry); 3263 __ Pop(cache_entry, a2, a3);
3269 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); 3264 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3270 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); 3265 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3271 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); 3266 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3272 3267
3273 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize)); 3268 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3274 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize)); 3269 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3275 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize)); 3270 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3276 3271
3277 __ mov(v0, cache_entry); 3272 __ mov(v0, cache_entry);
3278 __ Ret(); 3273 __ Ret();
3279 3274
3280 __ bind(&invalid_cache); 3275 __ bind(&invalid_cache);
3281 // The cache is invalid. Call runtime which will recreate the 3276 // The cache is invalid. Call runtime which will recreate the
3282 // cache. 3277 // cache.
3283 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); 3278 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3284 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache); 3279 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3285 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset)); 3280 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3286 __ EnterInternalFrame(); 3281 {
3287 __ push(a0); 3282 FrameScope scope(masm, StackFrame::INTERNAL);
3288 __ CallRuntime(RuntimeFunction(), 1); 3283 __ push(a0);
3289 __ LeaveInternalFrame(); 3284 __ CallRuntime(RuntimeFunction(), 1);
3285 }
3290 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset)); 3286 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3291 __ Ret(); 3287 __ Ret();
3292 3288
3293 __ bind(&skip_cache); 3289 __ bind(&skip_cache);
3294 // Call C function to calculate the result and answer directly 3290 // Call C function to calculate the result and answer directly
3295 // without updating the cache. 3291 // without updating the cache.
3296 GenerateCallCFunction(masm, scratch0); 3292 GenerateCallCFunction(masm, scratch0);
3297 __ GetCFunctionDoubleResult(f4); 3293 __ GetCFunctionDoubleResult(f4);
3298 __ bind(&no_update); 3294 __ bind(&no_update);
3299 3295
3300 // We return the value in f4 without adding it to the cache, but 3296 // We return the value in f4 without adding it to the cache, but
3301 // we cause a scavenging GC so that future allocations will succeed. 3297 // we cause a scavenging GC so that future allocations will succeed.
3302 __ EnterInternalFrame(); 3298 {
3299 FrameScope scope(masm, StackFrame::INTERNAL);
3303 3300
3304 // Allocate an aligned object larger than a HeapNumber. 3301 // Allocate an aligned object larger than a HeapNumber.
3305 ASSERT(4 * kPointerSize >= HeapNumber::kSize); 3302 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3306 __ li(scratch0, Operand(4 * kPointerSize)); 3303 __ li(scratch0, Operand(4 * kPointerSize));
3307 __ push(scratch0); 3304 __ push(scratch0);
3308 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 3305 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3309 __ LeaveInternalFrame(); 3306 }
3310 __ Ret(); 3307 __ Ret();
3311 } 3308 }
3312 } 3309 }
3313 3310
3314 3311
3315 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, 3312 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3316 Register scratch) { 3313 Register scratch) {
3317 __ push(ra); 3314 __ push(ra);
3318 __ PrepareCallCFunction(2, scratch); 3315 __ PrepareCallCFunction(2, scratch);
3319 if (IsMipsSoftFloatABI) { 3316 if (IsMipsSoftFloatABI) {
3320 __ Move(v0, v1, f4); 3317 __ Move(a0, a1, f4);
3321 } else { 3318 } else {
3322 __ mov_d(f12, f4); 3319 __ mov_d(f12, f4);
3323 } 3320 }
3321 AllowExternalCallThatCantCauseGC scope(masm);
3324 switch (type_) { 3322 switch (type_) {
3325 case TranscendentalCache::SIN: 3323 case TranscendentalCache::SIN:
3326 __ CallCFunction( 3324 __ CallCFunction(
3327 ExternalReference::math_sin_double_function(masm->isolate()), 2); 3325 ExternalReference::math_sin_double_function(masm->isolate()),
3326 0, 1);
3328 break; 3327 break;
3329 case TranscendentalCache::COS: 3328 case TranscendentalCache::COS:
3330 __ CallCFunction( 3329 __ CallCFunction(
3331 ExternalReference::math_cos_double_function(masm->isolate()), 2); 3330 ExternalReference::math_cos_double_function(masm->isolate()),
3331 0, 1);
3332 break; 3332 break;
3333 case TranscendentalCache::LOG: 3333 case TranscendentalCache::LOG:
3334 __ CallCFunction( 3334 __ CallCFunction(
3335 ExternalReference::math_log_double_function(masm->isolate()), 2); 3335 ExternalReference::math_log_double_function(masm->isolate()),
3336 0, 1);
3336 break; 3337 break;
3337 default: 3338 default:
3338 UNIMPLEMENTED(); 3339 UNIMPLEMENTED();
3339 break; 3340 break;
3340 } 3341 }
3341 __ pop(ra); 3342 __ pop(ra);
3342 } 3343 }
3343 3344
3344 3345
3345 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 3346 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
3408 // The base is in a double register and the exponent is 3409 // The base is in a double register and the exponent is
3409 // an untagged smi. Allocate a heap number and call a 3410 // an untagged smi. Allocate a heap number and call a
3410 // C function for integer exponents. The register containing 3411 // C function for integer exponents. The register containing
3411 // the heap number is callee-saved. 3412 // the heap number is callee-saved.
3412 __ AllocateHeapNumber(heapnumber, 3413 __ AllocateHeapNumber(heapnumber,
3413 scratch, 3414 scratch,
3414 scratch2, 3415 scratch2,
3415 heapnumbermap, 3416 heapnumbermap,
3416 &call_runtime); 3417 &call_runtime);
3417 __ push(ra); 3418 __ push(ra);
3418 __ PrepareCallCFunction(3, scratch); 3419 __ PrepareCallCFunction(1, 1, scratch);
3419 __ SetCallCDoubleArguments(double_base, exponent); 3420 __ SetCallCDoubleArguments(double_base, exponent);
3420 __ CallCFunction( 3421 {
3421 ExternalReference::power_double_int_function(masm->isolate()), 3); 3422 AllowExternalCallThatCantCauseGC scope(masm);
3422 __ pop(ra); 3423 __ CallCFunction(
3423 __ GetCFunctionDoubleResult(double_result); 3424 ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
3425 __ pop(ra);
3426 __ GetCFunctionDoubleResult(double_result);
3427 }
3424 __ sdc1(double_result, 3428 __ sdc1(double_result,
3425 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); 3429 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3426 __ mov(v0, heapnumber); 3430 __ mov(v0, heapnumber);
3427 __ DropAndRet(2 * kPointerSize); 3431 __ DropAndRet(2 * kPointerSize);
3428 3432
3429 __ bind(&exponent_not_smi); 3433 __ bind(&exponent_not_smi);
3430 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); 3434 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3431 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); 3435 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3432 // Exponent is a heapnumber. Load it into double register. 3436 // Exponent is a heapnumber. Load it into double register.
3433 __ ldc1(double_exponent, 3437 __ ldc1(double_exponent,
3434 FieldMemOperand(exponent, HeapNumber::kValueOffset)); 3438 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3435 3439
3436 // The base and the exponent are in double registers. 3440 // The base and the exponent are in double registers.
3437 // Allocate a heap number and call a C function for 3441 // Allocate a heap number and call a C function for
3438 // double exponents. The register containing 3442 // double exponents. The register containing
3439 // the heap number is callee-saved. 3443 // the heap number is callee-saved.
3440 __ AllocateHeapNumber(heapnumber, 3444 __ AllocateHeapNumber(heapnumber,
3441 scratch, 3445 scratch,
3442 scratch2, 3446 scratch2,
3443 heapnumbermap, 3447 heapnumbermap,
3444 &call_runtime); 3448 &call_runtime);
3445 __ push(ra); 3449 __ push(ra);
3446 __ PrepareCallCFunction(4, scratch); 3450 __ PrepareCallCFunction(0, 2, scratch);
3447 // ABI (o32) for func(double a, double b): a in f12, b in f14. 3451 // ABI (o32) for func(double a, double b): a in f12, b in f14.
3448 ASSERT(double_base.is(f12)); 3452 ASSERT(double_base.is(f12));
3449 ASSERT(double_exponent.is(f14)); 3453 ASSERT(double_exponent.is(f14));
3450 __ SetCallCDoubleArguments(double_base, double_exponent); 3454 __ SetCallCDoubleArguments(double_base, double_exponent);
3451 __ CallCFunction( 3455 {
3452 ExternalReference::power_double_double_function(masm->isolate()), 4); 3456 AllowExternalCallThatCantCauseGC scope(masm);
3453 __ pop(ra); 3457 __ CallCFunction(
3454 __ GetCFunctionDoubleResult(double_result); 3458 ExternalReference::power_double_double_function(masm->isolate()),
3459 0,
3460 2);
3461 __ pop(ra);
3462 __ GetCFunctionDoubleResult(double_result);
3463 }
3455 __ sdc1(double_result, 3464 __ sdc1(double_result,
3456 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); 3465 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3457 __ mov(v0, heapnumber); 3466 __ mov(v0, heapnumber);
3458 __ DropAndRet(2 * kPointerSize); 3467 __ DropAndRet(2 * kPointerSize);
3459 } 3468 }
3460 3469
3461 __ bind(&call_runtime); 3470 __ bind(&call_runtime);
3462 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); 3471 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3463 } 3472 }
3464 3473
3465 3474
3466 bool CEntryStub::NeedsImmovableCode() { 3475 bool CEntryStub::NeedsImmovableCode() {
3467 return true; 3476 return true;
3468 } 3477 }
3469 3478
3470 3479
3480 bool CEntryStub::IsPregenerated() {
3481 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3482 result_size_ == 1;
3483 }
3484
3485
3486 void CodeStub::GenerateStubsAheadOfTime() {
3487 }
3488
3489
3490 void CodeStub::GenerateFPStubs() {
3491 CEntryStub save_doubles(1);
3492 save_doubles.SaveDoubles();
3493 Handle<Code> code = save_doubles.GetCode();
3494 code->GetIsolate()->set_fp_stubs_generated(true);
3495 }
3496
3497
3471 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { 3498 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
3472 __ Throw(v0); 3499 __ Throw(v0);
3473 } 3500 }
3474 3501
3475 3502
3476 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, 3503 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3477 UncatchableExceptionType type) { 3504 UncatchableExceptionType type) {
3478 __ ThrowUncatchable(type, v0); 3505 __ ThrowUncatchable(type, v0);
3479 } 3506 }
3480 3507
3481 3508
3482 void CEntryStub::GenerateCore(MacroAssembler* masm, 3509 void CEntryStub::GenerateCore(MacroAssembler* masm,
3483 Label* throw_normal_exception, 3510 Label* throw_normal_exception,
3484 Label* throw_termination_exception, 3511 Label* throw_termination_exception,
3485 Label* throw_out_of_memory_exception, 3512 Label* throw_out_of_memory_exception,
3486 bool do_gc, 3513 bool do_gc,
3487 bool always_allocate) { 3514 bool always_allocate) {
3488 // v0: result parameter for PerformGC, if any 3515 // v0: result parameter for PerformGC, if any
3489 // s0: number of arguments including receiver (C callee-saved) 3516 // s0: number of arguments including receiver (C callee-saved)
3490 // s1: pointer to the first argument (C callee-saved) 3517 // s1: pointer to the first argument (C callee-saved)
3491 // s2: pointer to builtin function (C callee-saved) 3518 // s2: pointer to builtin function (C callee-saved)
3492 3519
3493 if (do_gc) { 3520 if (do_gc) {
3494 // Move result passed in v0 into a0 to call PerformGC. 3521 // Move result passed in v0 into a0 to call PerformGC.
3495 __ mov(a0, v0); 3522 __ mov(a0, v0);
3496 __ PrepareCallCFunction(1, a1); 3523 __ PrepareCallCFunction(1, 0, a1);
3497 __ CallCFunction( 3524 __ CallCFunction(
3498 ExternalReference::perform_gc_function(masm->isolate()), 1); 3525 ExternalReference::perform_gc_function(masm->isolate()),
3526 1, 0);
3499 } 3527 }
3500 3528
3501 ExternalReference scope_depth = 3529 ExternalReference scope_depth =
3502 ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); 3530 ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3503 if (always_allocate) { 3531 if (always_allocate) {
3504 __ li(a0, Operand(scope_depth)); 3532 __ li(a0, Operand(scope_depth));
3505 __ lw(a1, MemOperand(a0)); 3533 __ lw(a1, MemOperand(a0));
3506 __ Addu(a1, a1, Operand(1)); 3534 __ Addu(a1, a1, Operand(1));
3507 __ sw(a1, MemOperand(a0)); 3535 __ sw(a1, MemOperand(a0));
3508 } 3536 }
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
3621 // instead of a proper result. The builtin entry handles 3649 // instead of a proper result. The builtin entry handles
3622 // this by performing a garbage collection and retrying the 3650 // this by performing a garbage collection and retrying the
3623 // builtin once. 3651 // builtin once.
3624 3652
3625 // Compute the argv pointer in a callee-saved register. 3653 // Compute the argv pointer in a callee-saved register.
3626 __ sll(s1, a0, kPointerSizeLog2); 3654 __ sll(s1, a0, kPointerSizeLog2);
3627 __ Addu(s1, sp, s1); 3655 __ Addu(s1, sp, s1);
3628 __ Subu(s1, s1, Operand(kPointerSize)); 3656 __ Subu(s1, s1, Operand(kPointerSize));
3629 3657
3630 // Enter the exit frame that transitions from JavaScript to C++. 3658 // Enter the exit frame that transitions from JavaScript to C++.
3659 FrameScope scope(masm, StackFrame::MANUAL);
3631 __ EnterExitFrame(save_doubles_); 3660 __ EnterExitFrame(save_doubles_);
3632 3661
3633 // Setup argc and the builtin function in callee-saved registers. 3662 // Setup argc and the builtin function in callee-saved registers.
3634 __ mov(s0, a0); 3663 __ mov(s0, a0);
3635 __ mov(s2, a1); 3664 __ mov(s2, a1);
3636 3665
3637 // s0: number of arguments (C callee-saved) 3666 // s0: number of arguments (C callee-saved)
3638 // s1: pointer to first argument (C callee-saved) 3667 // s1: pointer to first argument (C callee-saved)
3639 // s2: pointer to builtin function (C callee-saved) 3668 // s2: pointer to builtin function (C callee-saved)
3640 3669
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
3692 // 4 args slots 3721 // 4 args slots
3693 // args 3722 // args
3694 3723
3695 // Save callee saved registers on the stack. 3724 // Save callee saved registers on the stack.
3696 __ MultiPush(kCalleeSaved | ra.bit()); 3725 __ MultiPush(kCalleeSaved | ra.bit());
3697 3726
3698 if (CpuFeatures::IsSupported(FPU)) { 3727 if (CpuFeatures::IsSupported(FPU)) {
3699 CpuFeatures::Scope scope(FPU); 3728 CpuFeatures::Scope scope(FPU);
3700 // Save callee-saved FPU registers. 3729 // Save callee-saved FPU registers.
3701 __ MultiPushFPU(kCalleeSavedFPU); 3730 __ MultiPushFPU(kCalleeSavedFPU);
3731 // Set up the reserved register for 0.0.
3732 __ Move(kDoubleRegZero, 0.0);
3702 } 3733 }
3703 3734
3735
3704 // Load argv in s0 register. 3736 // Load argv in s0 register.
3705 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; 3737 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
3706 if (CpuFeatures::IsSupported(FPU)) { 3738 if (CpuFeatures::IsSupported(FPU)) {
3707 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; 3739 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
3708 } 3740 }
3709 3741
3710 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); 3742 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
3711 3743
3712 // We build an EntryFrame. 3744 // We build an EntryFrame.
3713 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. 3745 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
3850 // Return. 3882 // Return.
3851 __ Jump(ra); 3883 __ Jump(ra);
3852 } 3884 }
3853 3885
3854 3886
3855 // Uses registers a0 to t0. 3887 // Uses registers a0 to t0.
3856 // Expected input (depending on whether args are in registers or on the stack): 3888 // Expected input (depending on whether args are in registers or on the stack):
3857 // * object: a0 or at sp + 1 * kPointerSize. 3889 // * object: a0 or at sp + 1 * kPointerSize.
3858 // * function: a1 or at sp. 3890 // * function: a1 or at sp.
3859 // 3891 //
3860 // Inlined call site patching is a crankshaft-specific feature that is not 3892 // An inlined call site may have been generated before calling this stub.
3861 // implemented on MIPS. 3893 // In this case the offset to the inline site to patch is passed on the stack,
3894 // in the safepoint slot for register t0.
3862 void InstanceofStub::Generate(MacroAssembler* masm) { 3895 void InstanceofStub::Generate(MacroAssembler* masm) {
3863 // This is a crankshaft-specific feature that has not been implemented yet.
3864 ASSERT(!HasCallSiteInlineCheck());
3865 // Call site inlining and patching implies arguments in registers. 3896 // Call site inlining and patching implies arguments in registers.
3866 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); 3897 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
3867 // ReturnTrueFalse is only implemented for inlined call sites. 3898 // ReturnTrueFalse is only implemented for inlined call sites.
3868 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); 3899 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
3869 3900
3870 // Fixed register usage throughout the stub: 3901 // Fixed register usage throughout the stub:
3871 const Register object = a0; // Object (lhs). 3902 const Register object = a0; // Object (lhs).
3872 Register map = a3; // Map of the object. 3903 Register map = a3; // Map of the object.
3873 const Register function = a1; // Function (rhs). 3904 const Register function = a1; // Function (rhs).
3874 const Register prototype = t0; // Prototype of the function. 3905 const Register prototype = t0; // Prototype of the function.
3875 const Register inline_site = t5; 3906 const Register inline_site = t5;
3876 const Register scratch = a2; 3907 const Register scratch = a2;
3877 3908
3909 const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
3910
3878 Label slow, loop, is_instance, is_not_instance, not_js_object; 3911 Label slow, loop, is_instance, is_not_instance, not_js_object;
3879 3912
3880 if (!HasArgsInRegisters()) { 3913 if (!HasArgsInRegisters()) {
3881 __ lw(object, MemOperand(sp, 1 * kPointerSize)); 3914 __ lw(object, MemOperand(sp, 1 * kPointerSize));
3882 __ lw(function, MemOperand(sp, 0)); 3915 __ lw(function, MemOperand(sp, 0));
3883 } 3916 }
3884 3917
3885 // Check that the left hand is a JS object and load map. 3918 // Check that the left hand is a JS object and load map.
3886 __ JumpIfSmi(object, &not_js_object); 3919 __ JumpIfSmi(object, &not_js_object);
3887 __ IsObjectJSObjectType(object, map, scratch, &not_js_object); 3920 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
3888 3921
3889 // If there is a call site cache don't look in the global cache, but do the 3922 // If there is a call site cache don't look in the global cache, but do the
3890 // real lookup and update the call site cache. 3923 // real lookup and update the call site cache.
3891 if (!HasCallSiteInlineCheck()) { 3924 if (!HasCallSiteInlineCheck()) {
3892 Label miss; 3925 Label miss;
3893 __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex); 3926 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
3894 __ Branch(&miss, ne, function, Operand(t1)); 3927 __ Branch(&miss, ne, function, Operand(at));
3895 __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex); 3928 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
3896 __ Branch(&miss, ne, map, Operand(t1)); 3929 __ Branch(&miss, ne, map, Operand(at));
3897 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); 3930 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3898 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 3931 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3899 3932
3900 __ bind(&miss); 3933 __ bind(&miss);
3901 } 3934 }
3902 3935
3903 // Get the prototype of the function. 3936 // Get the prototype of the function.
3904 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); 3937 __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
3905 3938
3906 // Check that the function prototype is a JS object. 3939 // Check that the function prototype is a JS object.
3907 __ JumpIfSmi(prototype, &slow); 3940 __ JumpIfSmi(prototype, &slow);
3908 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); 3941 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
3909 3942
3910 // Update the global instanceof or call site inlined cache with the current 3943 // Update the global instanceof or call site inlined cache with the current
3911 // map and function. The cached answer will be set when it is known below. 3944 // map and function. The cached answer will be set when it is known below.
3912 if (!HasCallSiteInlineCheck()) { 3945 if (!HasCallSiteInlineCheck()) {
3913 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); 3946 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
3914 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); 3947 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
3915 } else { 3948 } else {
3916 UNIMPLEMENTED_MIPS(); 3949 ASSERT(HasArgsInRegisters());
3950 // Patch the (relocated) inlined map check.
3951
3952 // The offset was stored in t0 safepoint slot.
3953 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
3954 __ LoadFromSafepointRegisterSlot(scratch, t0);
3955 __ Subu(inline_site, ra, scratch);
3956 // Patch the relocated value to map.
3957 __ PatchRelocatedValue(inline_site, scratch, map);
3917 } 3958 }
3918 3959
3919 // Register mapping: a3 is object map and t0 is function prototype. 3960 // Register mapping: a3 is object map and t0 is function prototype.
3920 // Get prototype of object into a2. 3961 // Get prototype of object into a2.
3921 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); 3962 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
3922 3963
3923 // We don't need map any more. Use it as a scratch register. 3964 // We don't need map any more. Use it as a scratch register.
3924 Register scratch2 = map; 3965 Register scratch2 = map;
3925 map = no_reg; 3966 map = no_reg;
3926 3967
3927 // Loop through the prototype chain looking for the function prototype. 3968 // Loop through the prototype chain looking for the function prototype.
3928 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); 3969 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
3929 __ bind(&loop); 3970 __ bind(&loop);
3930 __ Branch(&is_instance, eq, scratch, Operand(prototype)); 3971 __ Branch(&is_instance, eq, scratch, Operand(prototype));
3931 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); 3972 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
3932 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); 3973 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
3933 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); 3974 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
3934 __ Branch(&loop); 3975 __ Branch(&loop);
3935 3976
3936 __ bind(&is_instance); 3977 __ bind(&is_instance);
3937 ASSERT(Smi::FromInt(0) == 0); 3978 ASSERT(Smi::FromInt(0) == 0);
3938 if (!HasCallSiteInlineCheck()) { 3979 if (!HasCallSiteInlineCheck()) {
3939 __ mov(v0, zero_reg); 3980 __ mov(v0, zero_reg);
3940 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); 3981 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3941 } else { 3982 } else {
3942 UNIMPLEMENTED_MIPS(); 3983 // Patch the call site to return true.
3984 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
3985 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
3986 // Get the boolean result location in scratch and patch it.
3987 __ PatchRelocatedValue(inline_site, scratch, v0);
3988
3989 if (!ReturnTrueFalseObject()) {
3990 ASSERT_EQ(Smi::FromInt(0), 0);
3991 __ mov(v0, zero_reg);
3992 }
3943 } 3993 }
3944 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 3994 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3945 3995
3946 __ bind(&is_not_instance); 3996 __ bind(&is_not_instance);
3947 if (!HasCallSiteInlineCheck()) { 3997 if (!HasCallSiteInlineCheck()) {
3948 __ li(v0, Operand(Smi::FromInt(1))); 3998 __ li(v0, Operand(Smi::FromInt(1)));
3949 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); 3999 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3950 } else { 4000 } else {
3951 UNIMPLEMENTED_MIPS(); 4001 // Patch the call site to return false.
4002 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4003 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4004 // Get the boolean result location in scratch and patch it.
4005 __ PatchRelocatedValue(inline_site, scratch, v0);
4006
4007 if (!ReturnTrueFalseObject()) {
4008 __ li(v0, Operand(Smi::FromInt(1)));
4009 }
3952 } 4010 }
4011
3953 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4012 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3954 4013
3955 Label object_not_null, object_not_null_or_smi; 4014 Label object_not_null, object_not_null_or_smi;
3956 __ bind(&not_js_object); 4015 __ bind(&not_js_object);
3957 // Before null, smi and string value checks, check that the rhs is a function 4016 // Before null, smi and string value checks, check that the rhs is a function
3958 // as for a non-function rhs an exception needs to be thrown. 4017 // as for a non-function rhs an exception needs to be thrown.
3959 __ JumpIfSmi(function, &slow); 4018 __ JumpIfSmi(function, &slow);
3960 __ GetObjectType(function, scratch2, scratch); 4019 __ GetObjectType(function, scratch2, scratch);
3961 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE)); 4020 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
3962 4021
(...skipping 16 matching lines...) Expand all
3979 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4038 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3980 4039
3981 // Slow-case. Tail call builtin. 4040 // Slow-case. Tail call builtin.
3982 __ bind(&slow); 4041 __ bind(&slow);
3983 if (!ReturnTrueFalseObject()) { 4042 if (!ReturnTrueFalseObject()) {
3984 if (HasArgsInRegisters()) { 4043 if (HasArgsInRegisters()) {
3985 __ Push(a0, a1); 4044 __ Push(a0, a1);
3986 } 4045 }
3987 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 4046 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
3988 } else { 4047 } else {
3989 __ EnterInternalFrame(); 4048 {
3990 __ Push(a0, a1); 4049 FrameScope scope(masm, StackFrame::INTERNAL);
3991 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); 4050 __ Push(a0, a1);
3992 __ LeaveInternalFrame(); 4051 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4052 }
3993 __ mov(a0, v0); 4053 __ mov(a0, v0);
3994 __ LoadRoot(v0, Heap::kTrueValueRootIndex); 4054 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
3995 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg)); 4055 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
3996 __ LoadRoot(v0, Heap::kFalseValueRootIndex); 4056 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
3997 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4057 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3998 } 4058 }
3999 } 4059 }
4000 4060
4001 4061
4002 Register InstanceofStub::left() { return a0; } 4062 Register InstanceofStub::left() { return a0; }
(...skipping 651 matching lines...) Expand 10 before | Expand all | Expand 10 after
4654 __ addu(a0, a0, a2); 4714 __ addu(a0, a0, a2);
4655 __ sw(a0, MemOperand(sp, 2 * kPointerSize)); 4715 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
4656 4716
4657 // Argument 5: static offsets vector buffer. 4717 // Argument 5: static offsets vector buffer.
4658 __ li(a0, Operand( 4718 __ li(a0, Operand(
4659 ExternalReference::address_of_static_offsets_vector(masm->isolate()))); 4719 ExternalReference::address_of_static_offsets_vector(masm->isolate())));
4660 __ sw(a0, MemOperand(sp, 1 * kPointerSize)); 4720 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
4661 4721
4662 // For arguments 4 and 3 get string length, calculate start of string data 4722 // For arguments 4 and 3 get string length, calculate start of string data
4663 // and calculate the shift of the index (0 for ASCII and 1 for two byte). 4723 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
4664 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); 4724 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
4665 __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
4666 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte. 4725 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
4667 // Load the length from the original subject string from the previous stack 4726 // Load the length from the original subject string from the previous stack
4668 // frame. Therefore we have to use fp, which points exactly to two pointer 4727 // frame. Therefore we have to use fp, which points exactly to two pointer
4669 // sizes below the previous sp. (Because creating a new stack frame pushes 4728 // sizes below the previous sp. (Because creating a new stack frame pushes
4670 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) 4729 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
4671 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); 4730 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
4672 // If slice offset is not 0, load the length from the original sliced string. 4731 // If slice offset is not 0, load the length from the original sliced string.
4673 // Argument 4, a3: End of string data 4732 // Argument 4, a3: End of string data
4674 // Argument 3, a2: Start of string data 4733 // Argument 3, a2: Start of string data
4675 // Prepare start and end index of the input. 4734 // Prepare start and end index of the input.
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
4889 __ bind(&done); 4948 __ bind(&done);
4890 __ Addu(sp, sp, Operand(3 * kPointerSize)); 4949 __ Addu(sp, sp, Operand(3 * kPointerSize));
4891 __ Ret(); 4950 __ Ret();
4892 4951
4893 __ bind(&slowcase); 4952 __ bind(&slowcase);
4894 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); 4953 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4895 } 4954 }
4896 4955
4897 4956
4898 void CallFunctionStub::Generate(MacroAssembler* masm) { 4957 void CallFunctionStub::Generate(MacroAssembler* masm) {
4899 Label slow; 4958 Label slow, non_function;
4900 4959
4901 // The receiver might implicitly be the global object. This is 4960 // The receiver might implicitly be the global object. This is
4902 // indicated by passing the hole as the receiver to the call 4961 // indicated by passing the hole as the receiver to the call
4903 // function stub. 4962 // function stub.
4904 if (ReceiverMightBeImplicit()) { 4963 if (ReceiverMightBeImplicit()) {
4905 Label call; 4964 Label call;
4906 // Get the receiver from the stack. 4965 // Get the receiver from the stack.
4907 // function, receiver [, arguments] 4966 // function, receiver [, arguments]
4908 __ lw(t0, MemOperand(sp, argc_ * kPointerSize)); 4967 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
4909 // Call as function is indicated with the hole. 4968 // Call as function is indicated with the hole.
4910 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 4969 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4911 __ Branch(&call, ne, t0, Operand(at)); 4970 __ Branch(&call, ne, t0, Operand(at));
4912 // Patch the receiver on the stack with the global receiver object. 4971 // Patch the receiver on the stack with the global receiver object.
4913 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 4972 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4914 __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset)); 4973 __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
4915 __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); 4974 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
4916 __ bind(&call); 4975 __ bind(&call);
4917 } 4976 }
4918 4977
4919 // Get the function to call from the stack. 4978 // Get the function to call from the stack.
4920 // function, receiver [, arguments] 4979 // function, receiver [, arguments]
4921 __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize)); 4980 __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
4922 4981
4923 // Check that the function is really a JavaScript function. 4982 // Check that the function is really a JavaScript function.
4924 // a1: pushed function (to be verified) 4983 // a1: pushed function (to be verified)
4925 __ JumpIfSmi(a1, &slow); 4984 __ JumpIfSmi(a1, &non_function);
4926 // Get the map of the function object. 4985 // Get the map of the function object.
4927 __ GetObjectType(a1, a2, a2); 4986 __ GetObjectType(a1, a2, a2);
4928 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); 4987 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
4929 4988
4930 // Fast-case: Invoke the function now. 4989 // Fast-case: Invoke the function now.
4931 // a1: pushed function 4990 // a1: pushed function
4932 ParameterCount actual(argc_); 4991 ParameterCount actual(argc_);
4933 4992
4934 if (ReceiverMightBeImplicit()) { 4993 if (ReceiverMightBeImplicit()) {
4935 Label call_as_function; 4994 Label call_as_function;
4936 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 4995 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4937 __ Branch(&call_as_function, eq, t0, Operand(at)); 4996 __ Branch(&call_as_function, eq, t0, Operand(at));
4938 __ InvokeFunction(a1, 4997 __ InvokeFunction(a1,
4939 actual, 4998 actual,
4940 JUMP_FUNCTION, 4999 JUMP_FUNCTION,
4941 NullCallWrapper(), 5000 NullCallWrapper(),
4942 CALL_AS_METHOD); 5001 CALL_AS_METHOD);
4943 __ bind(&call_as_function); 5002 __ bind(&call_as_function);
4944 } 5003 }
4945 __ InvokeFunction(a1, 5004 __ InvokeFunction(a1,
4946 actual, 5005 actual,
4947 JUMP_FUNCTION, 5006 JUMP_FUNCTION,
4948 NullCallWrapper(), 5007 NullCallWrapper(),
4949 CALL_AS_FUNCTION); 5008 CALL_AS_FUNCTION);
4950 5009
4951 // Slow-case: Non-function called. 5010 // Slow-case: Non-function called.
4952 __ bind(&slow); 5011 __ bind(&slow);
5012 // Check for function proxy.
5013 __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
5014 __ push(a1); // Put proxy as additional argument.
5015 __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5016 __ li(a2, Operand(0, RelocInfo::NONE));
5017 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5018 __ SetCallKind(t1, CALL_AS_FUNCTION);
5019 {
5020 Handle<Code> adaptor =
5021 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5022 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5023 }
5024
4953 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 5025 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4954 // of the original receiver from the call site). 5026 // of the original receiver from the call site).
5027 __ bind(&non_function);
4955 __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); 5028 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
4956 __ li(a0, Operand(argc_)); // Setup the number of arguments. 5029 __ li(a0, Operand(argc_)); // Setup the number of arguments.
4957 __ mov(a2, zero_reg); 5030 __ mov(a2, zero_reg);
4958 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); 5031 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
4959 __ SetCallKind(t1, CALL_AS_METHOD); 5032 __ SetCallKind(t1, CALL_AS_METHOD);
4960 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5033 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
4961 RelocInfo::CODE_TARGET); 5034 RelocInfo::CODE_TARGET);
4962 } 5035 }
4963 5036
4964 5037
(...skipping 1491 matching lines...) Expand 10 before | Expand all | Expand 10 after
6456 // stub if NaN is involved or FPU is unsupported. 6529 // stub if NaN is involved or FPU is unsupported.
6457 if (CpuFeatures::IsSupported(FPU)) { 6530 if (CpuFeatures::IsSupported(FPU)) {
6458 CpuFeatures::Scope scope(FPU); 6531 CpuFeatures::Scope scope(FPU);
6459 6532
6460 // Load left and right operand. 6533 // Load left and right operand.
6461 __ Subu(a2, a1, Operand(kHeapObjectTag)); 6534 __ Subu(a2, a1, Operand(kHeapObjectTag));
6462 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); 6535 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6463 __ Subu(a2, a0, Operand(kHeapObjectTag)); 6536 __ Subu(a2, a0, Operand(kHeapObjectTag));
6464 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); 6537 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6465 6538
6466 Label fpu_eq, fpu_lt, fpu_gt; 6539 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6467 // Compare operands (test if unordered). 6540 Label fpu_eq, fpu_lt;
6468 __ c(UN, D, f0, f2); 6541 // Test if equal, and also handle the unordered/NaN case.
6469 // Don't base result on status bits when a NaN is involved. 6542 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
6470 __ bc1t(&unordered);
6471 __ nop();
6472 6543
6473 // Test if equal. 6544 // Test if less (unordered case is already handled).
6474 __ c(EQ, D, f0, f2); 6545 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
6475 __ bc1t(&fpu_eq);
6476 __ nop();
6477 6546
6478 // Test if unordered or less (unordered case is already handled). 6547 // Otherwise it's greater, so just fall thru, and return.
6479 __ c(ULT, D, f0, f2); 6548 __ Ret(USE_DELAY_SLOT);
6480 __ bc1t(&fpu_lt); 6549 __ li(v0, Operand(GREATER)); // In delay slot.
6481 __ nop();
6482 6550
6483 // Otherwise it's greater.
6484 __ bc1f(&fpu_gt);
6485 __ nop();
6486
6487 // Return a result of -1, 0, or 1.
6488 __ bind(&fpu_eq); 6551 __ bind(&fpu_eq);
6489 __ li(v0, Operand(EQUAL)); 6552 __ Ret(USE_DELAY_SLOT);
6490 __ Ret(); 6553 __ li(v0, Operand(EQUAL)); // In delay slot.
6491 6554
6492 __ bind(&fpu_lt); 6555 __ bind(&fpu_lt);
6493 __ li(v0, Operand(LESS)); 6556 __ Ret(USE_DELAY_SLOT);
6494 __ Ret(); 6557 __ li(v0, Operand(LESS)); // In delay slot.
6495
6496 __ bind(&fpu_gt);
6497 __ li(v0, Operand(GREATER));
6498 __ Ret();
6499 6558
6500 __ bind(&unordered); 6559 __ bind(&unordered);
6501 } 6560 }
6502 6561
6503 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); 6562 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6504 __ bind(&generic_stub); 6563 __ bind(&generic_stub);
6505 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 6564 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6506 6565
6507 __ bind(&miss); 6566 __ bind(&miss);
6508 GenerateMiss(masm); 6567 GenerateMiss(masm);
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
6639 } 6698 }
6640 6699
6641 6700
6642 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 6701 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6643 __ Push(a1, a0); 6702 __ Push(a1, a0);
6644 __ push(ra); 6703 __ push(ra);
6645 6704
6646 // Call the runtime system in a fresh internal frame. 6705 // Call the runtime system in a fresh internal frame.
6647 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), 6706 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6648 masm->isolate()); 6707 masm->isolate());
6649 __ EnterInternalFrame(); 6708 {
6650 __ Push(a1, a0); 6709 FrameScope scope(masm, StackFrame::INTERNAL);
6651 __ li(t0, Operand(Smi::FromInt(op_))); 6710 __ Push(a1, a0);
6652 __ push(t0); 6711 __ li(t0, Operand(Smi::FromInt(op_)));
6653 __ CallExternalReference(miss, 3); 6712 __ push(t0);
6654 __ LeaveInternalFrame(); 6713 __ CallExternalReference(miss, 3);
6714 }
6655 // Compute the entry point of the rewritten stub. 6715 // Compute the entry point of the rewritten stub.
6656 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); 6716 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6657 // Restore registers. 6717 // Restore registers.
6658 __ pop(ra); 6718 __ pop(ra);
6659 __ pop(a0); 6719 __ pop(a0);
6660 __ pop(a1); 6720 __ pop(a1);
6661 __ Jump(a2); 6721 __ Jump(a2);
6662 } 6722 }
6663 6723
6664 6724
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
6860 __ CallStub(&stub); 6920 __ CallStub(&stub);
6861 __ mov(scratch2, a2); 6921 __ mov(scratch2, a2);
6862 __ MultiPop(spill_mask); 6922 __ MultiPop(spill_mask);
6863 6923
6864 __ Branch(done, ne, v0, Operand(zero_reg)); 6924 __ Branch(done, ne, v0, Operand(zero_reg));
6865 __ Branch(miss, eq, v0, Operand(zero_reg)); 6925 __ Branch(miss, eq, v0, Operand(zero_reg));
6866 } 6926 }
6867 6927
6868 6928
6869 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { 6929 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6930 // This stub overrides SometimesSetsUpAFrame() to return false. That means
6931 // we cannot call anything that could cause a GC from this stub.
6870 // Registers: 6932 // Registers:
6871 // result: StringDictionary to probe 6933 // result: StringDictionary to probe
6872 // a1: key 6934 // a1: key
6873 // : StringDictionary to probe. 6935 // : StringDictionary to probe.
6874 // index_: will hold an index of entry if lookup is successful. 6936 // index_: will hold an index of entry if lookup is successful.
6875 // might alias with result_. 6937 // might alias with result_.
6876 // Returns: 6938 // Returns:
6877 // result_ is zero if lookup failed, non zero otherwise. 6939 // result_ is zero if lookup failed, non zero otherwise.
6878 6940
6879 Register result = v0; 6941 Register result = v0;
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
6958 __ mov(result, zero_reg); 7020 __ mov(result, zero_reg);
6959 __ Ret(); 7021 __ Ret();
6960 } 7022 }
6961 7023
6962 7024
6963 #undef __ 7025 #undef __
6964 7026
6965 } } // namespace v8::internal 7027 } } // namespace v8::internal
6966 7028
6967 #endif // V8_TARGET_ARCH_MIPS 7029 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/codegen-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698