OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 330 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
341 __ And(at, a1, a1); | 341 __ And(at, a1, a1); |
342 __ Branch(&check_optimized, ne, at, Operand(zero_reg)); | 342 __ Branch(&check_optimized, ne, at, Operand(zero_reg)); |
343 } | 343 } |
344 __ bind(&install_unoptimized); | 344 __ bind(&install_unoptimized); |
345 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); | 345 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); |
346 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset)); | 346 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset)); |
347 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset)); | 347 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset)); |
348 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 348 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
349 | 349 |
350 // Return result. The argument function info has been popped already. | 350 // Return result. The argument function info has been popped already. |
| 351 __ Ret(USE_DELAY_SLOT); |
351 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); | 352 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); |
352 __ Ret(); | |
353 | 353 |
354 __ bind(&check_optimized); | 354 __ bind(&check_optimized); |
355 | 355 |
356 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3); | 356 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3); |
357 | 357 |
358 // a2 holds native context, a1 points to fixed array of 3-element entries | 358 // a2 holds native context, a1 points to fixed array of 3-element entries |
359 // (native context, optimized code, literals). | 359 // (native context, optimized code, literals). |
360 // The optimized code map must never be empty, so check the first elements. | 360 // The optimized code map must never be empty, so check the first elements. |
361 Label install_optimized; | 361 Label install_optimized; |
362 // Speculatively move code object into t0. | 362 // Speculatively move code object into t0. |
(...skipping 599 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
962 // but it just ends up combining harmlessly with the last digit of the | 962 // but it just ends up combining harmlessly with the last digit of the |
963 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get | 963 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get |
964 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | 964 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. |
965 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | 965 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
966 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 966 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
967 __ srl(at, the_int_, shift_distance); | 967 __ srl(at, the_int_, shift_distance); |
968 __ or_(scratch_, scratch_, at); | 968 __ or_(scratch_, scratch_, at); |
969 __ sw(scratch_, FieldMemOperand(the_heap_number_, | 969 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
970 HeapNumber::kExponentOffset)); | 970 HeapNumber::kExponentOffset)); |
971 __ sll(scratch_, the_int_, 32 - shift_distance); | 971 __ sll(scratch_, the_int_, 32 - shift_distance); |
| 972 __ Ret(USE_DELAY_SLOT); |
972 __ sw(scratch_, FieldMemOperand(the_heap_number_, | 973 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
973 HeapNumber::kMantissaOffset)); | 974 HeapNumber::kMantissaOffset)); |
974 __ Ret(); | |
975 | 975 |
976 __ bind(&max_negative_int); | 976 __ bind(&max_negative_int); |
977 // The max negative int32 is stored as a positive number in the mantissa of | 977 // The max negative int32 is stored as a positive number in the mantissa of |
978 // a double because it uses a sign bit instead of using two's complement. | 978 // a double because it uses a sign bit instead of using two's complement. |
979 // The actual mantissa bits stored are all 0 because the implicit most | 979 // The actual mantissa bits stored are all 0 because the implicit most |
980 // significant 1 bit is not stored. | 980 // significant 1 bit is not stored. |
981 non_smi_exponent += 1 << HeapNumber::kExponentShift; | 981 non_smi_exponent += 1 << HeapNumber::kExponentShift; |
982 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent)); | 982 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent)); |
983 __ sw(scratch_, | 983 __ sw(scratch_, |
984 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); | 984 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); |
985 __ mov(scratch_, zero_reg); | 985 __ mov(scratch_, zero_reg); |
| 986 __ Ret(USE_DELAY_SLOT); |
986 __ sw(scratch_, | 987 __ sw(scratch_, |
987 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | 988 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); |
988 __ Ret(); | |
989 } | 989 } |
990 | 990 |
991 | 991 |
992 // Handle the case where the lhs and rhs are the same object. | 992 // Handle the case where the lhs and rhs are the same object. |
993 // Equality is almost reflexive (everything but NaN), so this is a test | 993 // Equality is almost reflexive (everything but NaN), so this is a test |
994 // for "identity and not NaN". | 994 // for "identity and not NaN". |
995 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 995 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
996 Label* slow, | 996 Label* slow, |
997 Condition cc) { | 997 Condition cc) { |
998 Label not_identical; | 998 Label not_identical; |
(...skipping 17 matching lines...) Expand all Loading... |
1016 // Comparing JS objects with <=, >= is complicated. | 1016 // Comparing JS objects with <=, >= is complicated. |
1017 if (cc != eq) { | 1017 if (cc != eq) { |
1018 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1018 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
1019 // Normally here we fall through to return_equal, but undefined is | 1019 // Normally here we fall through to return_equal, but undefined is |
1020 // special: (undefined == undefined) == true, but | 1020 // special: (undefined == undefined) == true, but |
1021 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 1021 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
1022 if (cc == less_equal || cc == greater_equal) { | 1022 if (cc == less_equal || cc == greater_equal) { |
1023 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); | 1023 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); |
1024 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); | 1024 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); |
1025 __ Branch(&return_equal, ne, a0, Operand(t2)); | 1025 __ Branch(&return_equal, ne, a0, Operand(t2)); |
| 1026 ASSERT(is_int16(GREATER) && is_int16(LESS)); |
| 1027 __ Ret(USE_DELAY_SLOT); |
1026 if (cc == le) { | 1028 if (cc == le) { |
1027 // undefined <= undefined should fail. | 1029 // undefined <= undefined should fail. |
1028 __ li(v0, Operand(GREATER)); | 1030 __ li(v0, Operand(GREATER)); |
1029 } else { | 1031 } else { |
1030 // undefined >= undefined should fail. | 1032 // undefined >= undefined should fail. |
1031 __ li(v0, Operand(LESS)); | 1033 __ li(v0, Operand(LESS)); |
1032 } | 1034 } |
1033 __ Ret(); | |
1034 } | 1035 } |
1035 } | 1036 } |
1036 } | 1037 } |
1037 | 1038 |
1038 __ bind(&return_equal); | 1039 __ bind(&return_equal); |
1039 | 1040 ASSERT(is_int16(GREATER) && is_int16(LESS)); |
| 1041 __ Ret(USE_DELAY_SLOT); |
1040 if (cc == less) { | 1042 if (cc == less) { |
1041 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. | 1043 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. |
1042 } else if (cc == greater) { | 1044 } else if (cc == greater) { |
1043 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. | 1045 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. |
1044 } else { | 1046 } else { |
1045 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. | 1047 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. |
1046 } | 1048 } |
1047 __ Ret(); | |
1048 | 1049 |
1049 // For less and greater we don't have to check for NaN since the result of | 1050 // For less and greater we don't have to check for NaN since the result of |
1050 // x < x is false regardless. For the others here is some code to check | 1051 // x < x is false regardless. For the others here is some code to check |
1051 // for NaN. | 1052 // for NaN. |
1052 if (cc != lt && cc != gt) { | 1053 if (cc != lt && cc != gt) { |
1053 __ bind(&heap_number); | 1054 __ bind(&heap_number); |
1054 // It is a heap number, so return non-equal if it's NaN and equal if it's | 1055 // It is a heap number, so return non-equal if it's NaN and equal if it's |
1055 // not NaN. | 1056 // not NaN. |
1056 | 1057 |
1057 // The representation of NaN values has all exponent bits (52..62) set, | 1058 // The representation of NaN values has all exponent bits (52..62) set, |
(...skipping 10 matching lines...) Expand all Loading... |
1068 // Or with all low-bits of mantissa. | 1069 // Or with all low-bits of mantissa. |
1069 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); | 1070 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
1070 __ Or(v0, t3, Operand(t2)); | 1071 __ Or(v0, t3, Operand(t2)); |
1071 // For equal we already have the right value in v0: Return zero (equal) | 1072 // For equal we already have the right value in v0: Return zero (equal) |
1072 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 1073 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
1073 // not (it's a NaN). For <= and >= we need to load v0 with the failing | 1074 // not (it's a NaN). For <= and >= we need to load v0 with the failing |
1074 // value if it's a NaN. | 1075 // value if it's a NaN. |
1075 if (cc != eq) { | 1076 if (cc != eq) { |
1076 // All-zero means Infinity means equal. | 1077 // All-zero means Infinity means equal. |
1077 __ Ret(eq, v0, Operand(zero_reg)); | 1078 __ Ret(eq, v0, Operand(zero_reg)); |
| 1079 ASSERT(is_int16(GREATER) && is_int16(LESS)); |
| 1080 __ Ret(USE_DELAY_SLOT); |
1078 if (cc == le) { | 1081 if (cc == le) { |
1079 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. | 1082 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. |
1080 } else { | 1083 } else { |
1081 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. | 1084 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. |
1082 } | 1085 } |
1083 } | 1086 } |
1084 __ Ret(); | |
1085 } | 1087 } |
1086 // No fall through here. | 1088 // No fall through here. |
1087 | 1089 |
1088 __ bind(¬_identical); | 1090 __ bind(¬_identical); |
1089 } | 1091 } |
1090 | 1092 |
1091 | 1093 |
1092 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 1094 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
1093 Register lhs, | 1095 Register lhs, |
1094 Register rhs, | 1096 Register rhs, |
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1449 // Check if EQUAL condition is satisfied. If true, move conditionally | 1451 // Check if EQUAL condition is satisfied. If true, move conditionally |
1450 // result to v0. | 1452 // result to v0. |
1451 __ c(EQ, D, f12, f14); | 1453 __ c(EQ, D, f12, f14); |
1452 __ Movt(v0, t2); | 1454 __ Movt(v0, t2); |
1453 | 1455 |
1454 __ Ret(); | 1456 __ Ret(); |
1455 | 1457 |
1456 __ bind(&nan); | 1458 __ bind(&nan); |
1457 // NaN comparisons always fail. | 1459 // NaN comparisons always fail. |
1458 // Load whatever we need in v0 to make the comparison fail. | 1460 // Load whatever we need in v0 to make the comparison fail. |
| 1461 ASSERT(is_int16(GREATER) && is_int16(LESS)); |
| 1462 __ Ret(USE_DELAY_SLOT); |
1459 if (cc == lt || cc == le) { | 1463 if (cc == lt || cc == le) { |
1460 __ li(v0, Operand(GREATER)); | 1464 __ li(v0, Operand(GREATER)); |
1461 } else { | 1465 } else { |
1462 __ li(v0, Operand(LESS)); | 1466 __ li(v0, Operand(LESS)); |
1463 } | 1467 } |
1464 __ Ret(); | 1468 |
1465 | 1469 |
1466 __ bind(¬_smis); | 1470 __ bind(¬_smis); |
1467 // At this point we know we are dealing with two different objects, | 1471 // At this point we know we are dealing with two different objects, |
1468 // and neither of them is a Smi. The objects are in lhs_ and rhs_. | 1472 // and neither of them is a Smi. The objects are in lhs_ and rhs_. |
1469 if (strict()) { | 1473 if (strict()) { |
1470 // This returns non-equal for some object types, or falls through if it | 1474 // This returns non-equal for some object types, or falls through if it |
1471 // was not lucky. | 1475 // was not lucky. |
1472 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); | 1476 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); |
1473 } | 1477 } |
1474 | 1478 |
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1718 } | 1722 } |
1719 | 1723 |
1720 | 1724 |
1721 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, | 1725 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, |
1722 Label* slow) { | 1726 Label* slow) { |
1723 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); | 1727 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); |
1724 // a0 is a heap number. Get a new heap number in a1. | 1728 // a0 is a heap number. Get a new heap number in a1. |
1725 if (mode_ == UNARY_OVERWRITE) { | 1729 if (mode_ == UNARY_OVERWRITE) { |
1726 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | 1730 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
1727 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. | 1731 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. |
| 1732 __ Ret(USE_DELAY_SLOT); |
1728 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | 1733 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
1729 } else { | 1734 } else { |
1730 Label slow_allocate_heapnumber, heapnumber_allocated; | 1735 Label slow_allocate_heapnumber, heapnumber_allocated; |
1731 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber); | 1736 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber); |
1732 __ jmp(&heapnumber_allocated); | 1737 __ jmp(&heapnumber_allocated); |
1733 | 1738 |
1734 __ bind(&slow_allocate_heapnumber); | 1739 __ bind(&slow_allocate_heapnumber); |
1735 { | 1740 { |
1736 FrameScope scope(masm, StackFrame::INTERNAL); | 1741 FrameScope scope(masm, StackFrame::INTERNAL); |
1737 __ push(a0); | 1742 __ push(a0); |
1738 __ CallRuntime(Runtime::kNumberAlloc, 0); | 1743 __ CallRuntime(Runtime::kNumberAlloc, 0); |
1739 __ mov(a1, v0); | 1744 __ mov(a1, v0); |
1740 __ pop(a0); | 1745 __ pop(a0); |
1741 } | 1746 } |
1742 | 1747 |
1743 __ bind(&heapnumber_allocated); | 1748 __ bind(&heapnumber_allocated); |
1744 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); | 1749 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
1745 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | 1750 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
1746 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); | 1751 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); |
1747 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. | 1752 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. |
1748 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); | 1753 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); |
| 1754 __ Ret(USE_DELAY_SLOT); |
1749 __ mov(v0, a1); | 1755 __ mov(v0, a1); |
1750 } | 1756 } |
1751 __ Ret(); | |
1752 } | 1757 } |
1753 | 1758 |
1754 | 1759 |
1755 void UnaryOpStub::GenerateHeapNumberCodeBitNot( | 1760 void UnaryOpStub::GenerateHeapNumberCodeBitNot( |
1756 MacroAssembler* masm, | 1761 MacroAssembler* masm, |
1757 Label* slow) { | 1762 Label* slow) { |
1758 Label impossible; | 1763 Label impossible; |
1759 | 1764 |
1760 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); | 1765 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); |
1761 // Convert the heap number in a0 to an untagged integer in a1. | 1766 // Convert the heap number in a0 to an untagged integer in a1. |
1762 __ ConvertToInt32(a0, a1, a2, a3, f0, slow); | 1767 __ ConvertToInt32(a0, a1, a2, a3, f0, slow); |
1763 | 1768 |
1764 // Do the bitwise operation and check if the result fits in a smi. | 1769 // Do the bitwise operation and check if the result fits in a smi. |
1765 Label try_float; | 1770 Label try_float; |
1766 __ Neg(a1, a1); | 1771 __ Neg(a1, a1); |
1767 __ Addu(a2, a1, Operand(0x40000000)); | 1772 __ Addu(a2, a1, Operand(0x40000000)); |
1768 __ Branch(&try_float, lt, a2, Operand(zero_reg)); | 1773 __ Branch(&try_float, lt, a2, Operand(zero_reg)); |
1769 | 1774 |
1770 // Tag the result as a smi and we're done. | 1775 // Tag the result as a smi and we're done. |
| 1776 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. |
1771 __ SmiTag(v0, a1); | 1777 __ SmiTag(v0, a1); |
1772 __ Ret(); | |
1773 | 1778 |
1774 // Try to store the result in a heap number. | 1779 // Try to store the result in a heap number. |
1775 __ bind(&try_float); | 1780 __ bind(&try_float); |
1776 if (mode_ == UNARY_NO_OVERWRITE) { | 1781 if (mode_ == UNARY_NO_OVERWRITE) { |
1777 Label slow_allocate_heapnumber, heapnumber_allocated; | 1782 Label slow_allocate_heapnumber, heapnumber_allocated; |
1778 // Allocate a new heap number without zapping v0, which we need if it fails. | 1783 // Allocate a new heap number without zapping v0, which we need if it fails. |
1779 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber); | 1784 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber); |
1780 __ jmp(&heapnumber_allocated); | 1785 __ jmp(&heapnumber_allocated); |
1781 | 1786 |
1782 __ bind(&slow_allocate_heapnumber); | 1787 __ bind(&slow_allocate_heapnumber); |
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1961 // Check for no remainder first. | 1966 // Check for no remainder first. |
1962 __ mfhi(scratch1); | 1967 __ mfhi(scratch1); |
1963 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); | 1968 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); |
1964 __ mflo(scratch1); | 1969 __ mflo(scratch1); |
1965 __ Branch(&done, ne, scratch1, Operand(zero_reg)); | 1970 __ Branch(&done, ne, scratch1, Operand(zero_reg)); |
1966 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); | 1971 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); |
1967 __ bind(&done); | 1972 __ bind(&done); |
1968 // Check that the signed result fits in a Smi. | 1973 // Check that the signed result fits in a Smi. |
1969 __ Addu(scratch2, scratch1, Operand(0x40000000)); | 1974 __ Addu(scratch2, scratch1, Operand(0x40000000)); |
1970 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); | 1975 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); |
| 1976 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. |
1971 __ SmiTag(v0, scratch1); | 1977 __ SmiTag(v0, scratch1); |
1972 __ Ret(); | |
1973 } | 1978 } |
1974 break; | 1979 break; |
1975 case Token::MOD: { | 1980 case Token::MOD: { |
1976 Label done; | 1981 Label done; |
1977 __ SmiUntag(scratch2, right); | 1982 __ SmiUntag(scratch2, right); |
1978 __ SmiUntag(scratch1, left); | 1983 __ SmiUntag(scratch1, left); |
1979 __ Div(scratch1, scratch2); | 1984 __ Div(scratch1, scratch2); |
1980 // A minor optimization: div may be calculated asynchronously, so we check | 1985 // A minor optimization: div may be calculated asynchronously, so we check |
1981 // for division by 0 before calling mfhi. | 1986 // for division by 0 before calling mfhi. |
1982 // Check for zero on the right hand side. | 1987 // Check for zero on the right hand side. |
1983 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); | 1988 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); |
1984 // If the result is 0, we need to make sure the dividend (left) is | 1989 // If the result is 0, we need to make sure the dividend (left) is |
1985 // positive (or 0), otherwise it is a -0 case. | 1990 // positive (or 0), otherwise it is a -0 case. |
1986 // Remainder is in 'hi'. | 1991 // Remainder is in 'hi'. |
1987 __ mfhi(scratch2); | 1992 __ mfhi(scratch2); |
1988 __ Branch(&done, ne, scratch2, Operand(zero_reg)); | 1993 __ Branch(&done, ne, scratch2, Operand(zero_reg)); |
1989 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); | 1994 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); |
1990 __ bind(&done); | 1995 __ bind(&done); |
1991 // Check that the signed result fits in a Smi. | 1996 // Check that the signed result fits in a Smi. |
1992 __ Addu(scratch1, scratch2, Operand(0x40000000)); | 1997 __ Addu(scratch1, scratch2, Operand(0x40000000)); |
1993 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); | 1998 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); |
| 1999 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. |
1994 __ SmiTag(v0, scratch2); | 2000 __ SmiTag(v0, scratch2); |
1995 __ Ret(); | |
1996 } | 2001 } |
1997 break; | 2002 break; |
1998 case Token::BIT_OR: | 2003 case Token::BIT_OR: |
1999 __ Ret(USE_DELAY_SLOT); | 2004 __ Ret(USE_DELAY_SLOT); |
2000 __ or_(v0, left, right); | 2005 __ or_(v0, left, right); |
2001 break; | 2006 break; |
2002 case Token::BIT_AND: | 2007 case Token::BIT_AND: |
2003 __ Ret(USE_DELAY_SLOT); | 2008 __ Ret(USE_DELAY_SLOT); |
2004 __ and_(v0, left, right); | 2009 __ and_(v0, left, right); |
2005 break; | 2010 break; |
(...skipping 13 matching lines...) Expand all Loading... |
2019 // Remove tags from operands. We can't do this on a 31 bit number | 2024 // Remove tags from operands. We can't do this on a 31 bit number |
2020 // because then the 0s get shifted into bit 30 instead of bit 31. | 2025 // because then the 0s get shifted into bit 30 instead of bit 31. |
2021 __ SmiUntag(scratch1, left); | 2026 __ SmiUntag(scratch1, left); |
2022 __ GetLeastBitsFromSmi(scratch2, right, 5); | 2027 __ GetLeastBitsFromSmi(scratch2, right, 5); |
2023 __ srlv(v0, scratch1, scratch2); | 2028 __ srlv(v0, scratch1, scratch2); |
2024 // Unsigned shift is not allowed to produce a negative number, so | 2029 // Unsigned shift is not allowed to produce a negative number, so |
2025 // check the sign bit and the sign bit after Smi tagging. | 2030 // check the sign bit and the sign bit after Smi tagging. |
2026 __ And(scratch1, v0, Operand(0xc0000000)); | 2031 __ And(scratch1, v0, Operand(0xc0000000)); |
2027 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); | 2032 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); |
2028 // Smi tag result. | 2033 // Smi tag result. |
| 2034 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. |
2029 __ SmiTag(v0); | 2035 __ SmiTag(v0); |
2030 __ Ret(); | |
2031 break; | 2036 break; |
2032 case Token::SHL: | 2037 case Token::SHL: |
2033 // Remove tags from operands. | 2038 // Remove tags from operands. |
2034 __ SmiUntag(scratch1, left); | 2039 __ SmiUntag(scratch1, left); |
2035 __ GetLeastBitsFromSmi(scratch2, right, 5); | 2040 __ GetLeastBitsFromSmi(scratch2, right, 5); |
2036 __ sllv(scratch1, scratch1, scratch2); | 2041 __ sllv(scratch1, scratch1, scratch2); |
2037 // Check that the signed result fits in a Smi. | 2042 // Check that the signed result fits in a Smi. |
2038 __ Addu(scratch2, scratch1, Operand(0x40000000)); | 2043 __ Addu(scratch2, scratch1, Operand(0x40000000)); |
2039 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); | 2044 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); |
2040 __ SmiTag(v0, scratch1); | 2045 __ Ret(USE_DELAY_SLOT); |
2041 __ Ret(); | 2046 __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot. |
2042 break; | 2047 break; |
2043 default: | 2048 default: |
2044 UNREACHABLE(); | 2049 UNREACHABLE(); |
2045 } | 2050 } |
2046 __ bind(¬_smi_result); | 2051 __ bind(¬_smi_result); |
2047 } | 2052 } |
2048 | 2053 |
2049 | 2054 |
2050 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | 2055 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
2051 Register result, | 2056 Register result, |
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2233 // Use only the 5 least significant bits of the shift count. | 2238 // Use only the 5 least significant bits of the shift count. |
2234 __ GetLeastBitsFromInt32(a2, a2, 5); | 2239 __ GetLeastBitsFromInt32(a2, a2, 5); |
2235 __ sllv(a2, a3, a2); | 2240 __ sllv(a2, a3, a2); |
2236 break; | 2241 break; |
2237 default: | 2242 default: |
2238 UNREACHABLE(); | 2243 UNREACHABLE(); |
2239 } | 2244 } |
2240 // Check that the *signed* result fits in a smi. | 2245 // Check that the *signed* result fits in a smi. |
2241 __ Addu(a3, a2, Operand(0x40000000)); | 2246 __ Addu(a3, a2, Operand(0x40000000)); |
2242 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg)); | 2247 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg)); |
| 2248 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. |
2243 __ SmiTag(v0, a2); | 2249 __ SmiTag(v0, a2); |
2244 __ Ret(); | |
2245 | 2250 |
2246 // Allocate new heap number for result. | 2251 // Allocate new heap number for result. |
2247 __ bind(&result_not_a_smi); | 2252 __ bind(&result_not_a_smi); |
2248 Register result = t1; | 2253 Register result = t1; |
2249 if (smi_operands) { | 2254 if (smi_operands) { |
2250 __ AllocateHeapNumber( | 2255 __ AllocateHeapNumber( |
2251 result, scratch1, scratch2, heap_number_map, gc_required); | 2256 result, scratch1, scratch2, heap_number_map, gc_required); |
2252 } else { | 2257 } else { |
2253 BinaryOpStub_GenerateHeapResultAllocation( | 2258 BinaryOpStub_GenerateHeapResultAllocation( |
2254 masm, result, heap_number_map, scratch1, scratch2, gc_required, | 2259 masm, result, heap_number_map, scratch1, scratch2, gc_required, |
(...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2513 __ mfc1(scratch2, f11); | 2518 __ mfc1(scratch2, f11); |
2514 __ And(scratch2, scratch2, HeapNumber::kSignMask); | 2519 __ And(scratch2, scratch2, HeapNumber::kSignMask); |
2515 __ Branch(result_type_ <= BinaryOpIC::INT32 ? &transition | 2520 __ Branch(result_type_ <= BinaryOpIC::INT32 ? &transition |
2516 : &return_heap_number, | 2521 : &return_heap_number, |
2517 ne, | 2522 ne, |
2518 scratch2, | 2523 scratch2, |
2519 Operand(zero_reg)); | 2524 Operand(zero_reg)); |
2520 __ bind(¬_zero); | 2525 __ bind(¬_zero); |
2521 | 2526 |
2522 // Tag the result and return. | 2527 // Tag the result and return. |
2523 __ SmiTag(v0, scratch1); | 2528 __ Ret(USE_DELAY_SLOT); |
2524 __ Ret(); | 2529 __ SmiTag(v0, scratch1); // SmiTag emits one instruction. |
2525 } else { | 2530 } else { |
2526 // DIV just falls through to allocating a heap number. | 2531 // DIV just falls through to allocating a heap number. |
2527 } | 2532 } |
2528 | 2533 |
2529 __ bind(&return_heap_number); | 2534 __ bind(&return_heap_number); |
2530 // Return a heap number, or fall through to type transition or runtime | 2535 // Return a heap number, or fall through to type transition or runtime |
2531 // call if we can't. | 2536 // call if we can't. |
2532 // We are using FPU registers so s0 is available. | 2537 // We are using FPU registers so s0 is available. |
2533 heap_number_result = s0; | 2538 heap_number_result = s0; |
2534 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2539 BinaryOpStub_GenerateHeapResultAllocation(masm, |
2535 heap_number_result, | 2540 heap_number_result, |
2536 heap_number_map, | 2541 heap_number_map, |
2537 scratch1, | 2542 scratch1, |
2538 scratch2, | 2543 scratch2, |
2539 &call_runtime, | 2544 &call_runtime, |
2540 mode_); | 2545 mode_); |
| 2546 __ sdc1(f10, |
| 2547 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
| 2548 __ Ret(USE_DELAY_SLOT); |
2541 __ mov(v0, heap_number_result); | 2549 __ mov(v0, heap_number_result); |
2542 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); | |
2543 __ Ret(); | |
2544 | 2550 |
2545 // A DIV operation expecting an integer result falls through | 2551 // A DIV operation expecting an integer result falls through |
2546 // to type transition. | 2552 // to type transition. |
2547 | 2553 |
2548 } else { | 2554 } else { |
2549 if (has_fixed_right_arg_) { | 2555 if (has_fixed_right_arg_) { |
2550 __ Move(f16, fixed_right_arg_value()); | 2556 __ Move(f16, fixed_right_arg_value()); |
2551 __ BranchF(&transition, NULL, ne, f14, f16); | 2557 __ BranchF(&transition, NULL, ne, f14, f16); |
2552 } | 2558 } |
2553 | 2559 |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2653 break; | 2659 break; |
2654 default: | 2660 default: |
2655 UNREACHABLE(); | 2661 UNREACHABLE(); |
2656 } | 2662 } |
2657 | 2663 |
2658 // Check if the result fits in a smi. | 2664 // Check if the result fits in a smi. |
2659 __ Addu(scratch1, a2, Operand(0x40000000)); | 2665 __ Addu(scratch1, a2, Operand(0x40000000)); |
2660 // If not try to return a heap number. (We know the result is an int32.) | 2666 // If not try to return a heap number. (We know the result is an int32.) |
2661 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); | 2667 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); |
2662 // Tag the result and return. | 2668 // Tag the result and return. |
| 2669 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. |
2663 __ SmiTag(v0, a2); | 2670 __ SmiTag(v0, a2); |
2664 __ Ret(); | |
2665 | 2671 |
2666 __ bind(&return_heap_number); | 2672 __ bind(&return_heap_number); |
2667 heap_number_result = t1; | 2673 heap_number_result = t1; |
2668 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2674 BinaryOpStub_GenerateHeapResultAllocation(masm, |
2669 heap_number_result, | 2675 heap_number_result, |
2670 heap_number_map, | 2676 heap_number_map, |
2671 scratch1, | 2677 scratch1, |
2672 scratch2, | 2678 scratch2, |
2673 &call_runtime, | 2679 &call_runtime, |
2674 mode_); | 2680 mode_); |
2675 | 2681 |
2676 if (op_ != Token::SHR) { | 2682 if (op_ != Token::SHR) { |
2677 // Convert the result to a floating point value. | 2683 // Convert the result to a floating point value. |
2678 __ mtc1(a2, double_scratch); | 2684 __ mtc1(a2, double_scratch); |
2679 __ cvt_d_w(double_scratch, double_scratch); | 2685 __ cvt_d_w(double_scratch, double_scratch); |
2680 } else { | 2686 } else { |
2681 // The result must be interpreted as an unsigned 32-bit integer. | 2687 // The result must be interpreted as an unsigned 32-bit integer. |
2682 __ mtc1(a2, double_scratch); | 2688 __ mtc1(a2, double_scratch); |
2683 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); | 2689 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); |
2684 } | 2690 } |
2685 | 2691 |
2686 // Store the result. | 2692 // Store the result. |
| 2693 __ sdc1(double_scratch, |
| 2694 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
| 2695 __ Ret(USE_DELAY_SLOT); |
2687 __ mov(v0, heap_number_result); | 2696 __ mov(v0, heap_number_result); |
2688 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset)); | |
2689 __ Ret(); | |
2690 | 2697 |
2691 break; | 2698 break; |
2692 } | 2699 } |
2693 | 2700 |
2694 default: | 2701 default: |
2695 UNREACHABLE(); | 2702 UNREACHABLE(); |
2696 } | 2703 } |
2697 | 2704 |
2698 // We never expect DIV to yield an integer result, so we always generate | 2705 // We never expect DIV to yield an integer result, so we always generate |
2699 // type transition code for DIV operations expecting an integer result: the | 2706 // type transition code for DIV operations expecting an integer result: the |
(...skipping 1417 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4117 | 4124 |
4118 // Check index (a1) against formal parameters count limit passed in | 4125 // Check index (a1) against formal parameters count limit passed in |
4119 // through register a0. Use unsigned comparison to get negative | 4126 // through register a0. Use unsigned comparison to get negative |
4120 // check for free. | 4127 // check for free. |
4121 __ Branch(&slow, hs, a1, Operand(a0)); | 4128 __ Branch(&slow, hs, a1, Operand(a0)); |
4122 | 4129 |
4123 // Read the argument from the stack and return it. | 4130 // Read the argument from the stack and return it. |
4124 __ subu(a3, a0, a1); | 4131 __ subu(a3, a0, a1); |
4125 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); | 4132 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); |
4126 __ Addu(a3, fp, Operand(t3)); | 4133 __ Addu(a3, fp, Operand(t3)); |
| 4134 __ Ret(USE_DELAY_SLOT); |
4127 __ lw(v0, MemOperand(a3, kDisplacement)); | 4135 __ lw(v0, MemOperand(a3, kDisplacement)); |
4128 __ Ret(); | |
4129 | 4136 |
4130 // Arguments adaptor case: Check index (a1) against actual arguments | 4137 // Arguments adaptor case: Check index (a1) against actual arguments |
4131 // limit found in the arguments adaptor frame. Use unsigned | 4138 // limit found in the arguments adaptor frame. Use unsigned |
4132 // comparison to get negative check for free. | 4139 // comparison to get negative check for free. |
4133 __ bind(&adaptor); | 4140 __ bind(&adaptor); |
4134 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 4141 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
4135 __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); | 4142 __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); |
4136 | 4143 |
4137 // Read the argument from the adaptor frame and return it. | 4144 // Read the argument from the adaptor frame and return it. |
4138 __ subu(a3, a0, a1); | 4145 __ subu(a3, a0, a1); |
4139 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); | 4146 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); |
4140 __ Addu(a3, a2, Operand(t3)); | 4147 __ Addu(a3, a2, Operand(t3)); |
| 4148 __ Ret(USE_DELAY_SLOT); |
4141 __ lw(v0, MemOperand(a3, kDisplacement)); | 4149 __ lw(v0, MemOperand(a3, kDisplacement)); |
4142 __ Ret(); | |
4143 | 4150 |
4144 // Slow-case: Handle non-smi or out-of-bounds access to arguments | 4151 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
4145 // by calling the runtime system. | 4152 // by calling the runtime system. |
4146 __ bind(&slow); | 4153 __ bind(&slow); |
4147 __ push(a1); | 4154 __ push(a1); |
4148 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | 4155 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
4149 } | 4156 } |
4150 | 4157 |
4151 | 4158 |
4152 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { | 4159 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { |
(...skipping 1842 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5995 Register scratch2, | 6002 Register scratch2, |
5996 Register scratch3) { | 6003 Register scratch3) { |
5997 Register length = scratch1; | 6004 Register length = scratch1; |
5998 | 6005 |
5999 // Compare lengths. | 6006 // Compare lengths. |
6000 Label strings_not_equal, check_zero_length; | 6007 Label strings_not_equal, check_zero_length; |
6001 __ lw(length, FieldMemOperand(left, String::kLengthOffset)); | 6008 __ lw(length, FieldMemOperand(left, String::kLengthOffset)); |
6002 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 6009 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
6003 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); | 6010 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); |
6004 __ bind(&strings_not_equal); | 6011 __ bind(&strings_not_equal); |
| 6012 ASSERT(is_int16(NOT_EQUAL)); |
| 6013 __ Ret(USE_DELAY_SLOT); |
6005 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); | 6014 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); |
6006 __ Ret(); | |
6007 | 6015 |
6008 // Check if the length is zero. | 6016 // Check if the length is zero. |
6009 Label compare_chars; | 6017 Label compare_chars; |
6010 __ bind(&check_zero_length); | 6018 __ bind(&check_zero_length); |
6011 STATIC_ASSERT(kSmiTag == 0); | 6019 STATIC_ASSERT(kSmiTag == 0); |
6012 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); | 6020 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); |
| 6021 ASSERT(is_int16(EQUAL)); |
| 6022 __ Ret(USE_DELAY_SLOT); |
6013 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 6023 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
6014 __ Ret(); | |
6015 | 6024 |
6016 // Compare characters. | 6025 // Compare characters. |
6017 __ bind(&compare_chars); | 6026 __ bind(&compare_chars); |
6018 | 6027 |
6019 GenerateAsciiCharsCompareLoop(masm, | 6028 GenerateAsciiCharsCompareLoop(masm, |
6020 left, right, length, scratch2, scratch3, v0, | 6029 left, right, length, scratch2, scratch3, v0, |
6021 &strings_not_equal); | 6030 &strings_not_equal); |
6022 | 6031 |
6023 // Characters are equal. | 6032 // Characters are equal. |
| 6033 __ Ret(USE_DELAY_SLOT); |
6024 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 6034 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
6025 __ Ret(); | |
6026 } | 6035 } |
6027 | 6036 |
6028 | 6037 |
6029 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 6038 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
6030 Register left, | 6039 Register left, |
6031 Register right, | 6040 Register right, |
6032 Register scratch1, | 6041 Register scratch1, |
6033 Register scratch2, | 6042 Register scratch2, |
6034 Register scratch3, | 6043 Register scratch3, |
6035 Register scratch4) { | 6044 Register scratch4) { |
(...skipping 497 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6533 | 6542 |
6534 | 6543 |
6535 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 6544 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
6536 ASSERT(state_ == CompareIC::SMI); | 6545 ASSERT(state_ == CompareIC::SMI); |
6537 Label miss; | 6546 Label miss; |
6538 __ Or(a2, a1, a0); | 6547 __ Or(a2, a1, a0); |
6539 __ JumpIfNotSmi(a2, &miss); | 6548 __ JumpIfNotSmi(a2, &miss); |
6540 | 6549 |
6541 if (GetCondition() == eq) { | 6550 if (GetCondition() == eq) { |
6542 // For equality we do not care about the sign of the result. | 6551 // For equality we do not care about the sign of the result. |
| 6552 __ Ret(USE_DELAY_SLOT); |
6543 __ Subu(v0, a0, a1); | 6553 __ Subu(v0, a0, a1); |
6544 } else { | 6554 } else { |
6545 // Untag before subtracting to avoid handling overflow. | 6555 // Untag before subtracting to avoid handling overflow. |
6546 __ SmiUntag(a1); | 6556 __ SmiUntag(a1); |
6547 __ SmiUntag(a0); | 6557 __ SmiUntag(a0); |
| 6558 __ Ret(USE_DELAY_SLOT); |
6548 __ Subu(v0, a1, a0); | 6559 __ Subu(v0, a1, a0); |
6549 } | 6560 } |
6550 __ Ret(); | |
6551 | 6561 |
6552 __ bind(&miss); | 6562 __ bind(&miss); |
6553 GenerateMiss(masm); | 6563 GenerateMiss(masm); |
6554 } | 6564 } |
6555 | 6565 |
6556 | 6566 |
6557 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 6567 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
6558 ASSERT(state_ == CompareIC::NUMBER); | 6568 ASSERT(state_ == CompareIC::NUMBER); |
6559 | 6569 |
6560 Label generic_stub; | 6570 Label generic_stub; |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6601 | 6611 |
6602 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. | 6612 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. |
6603 Label fpu_eq, fpu_lt; | 6613 Label fpu_eq, fpu_lt; |
6604 // Test if equal, and also handle the unordered/NaN case. | 6614 // Test if equal, and also handle the unordered/NaN case. |
6605 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); | 6615 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); |
6606 | 6616 |
6607 // Test if less (unordered case is already handled). | 6617 // Test if less (unordered case is already handled). |
6608 __ BranchF(&fpu_lt, NULL, lt, f0, f2); | 6618 __ BranchF(&fpu_lt, NULL, lt, f0, f2); |
6609 | 6619 |
6610 // Otherwise it's greater, so just fall thru, and return. | 6620 // Otherwise it's greater, so just fall thru, and return. |
| 6621 ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); |
| 6622 __ Ret(USE_DELAY_SLOT); |
6611 __ li(v0, Operand(GREATER)); | 6623 __ li(v0, Operand(GREATER)); |
6612 __ Ret(); | |
6613 | 6624 |
6614 __ bind(&fpu_eq); | 6625 __ bind(&fpu_eq); |
| 6626 __ Ret(USE_DELAY_SLOT); |
6615 __ li(v0, Operand(EQUAL)); | 6627 __ li(v0, Operand(EQUAL)); |
6616 __ Ret(); | |
6617 | 6628 |
6618 __ bind(&fpu_lt); | 6629 __ bind(&fpu_lt); |
| 6630 __ Ret(USE_DELAY_SLOT); |
6619 __ li(v0, Operand(LESS)); | 6631 __ li(v0, Operand(LESS)); |
6620 __ Ret(); | |
6621 | 6632 |
6622 __ bind(&unordered); | 6633 __ bind(&unordered); |
6623 __ bind(&generic_stub); | 6634 __ bind(&generic_stub); |
6624 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, | 6635 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
6625 CompareIC::GENERIC); | 6636 CompareIC::GENERIC); |
6626 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 6637 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
6627 | 6638 |
6628 __ bind(&maybe_undefined1); | 6639 __ bind(&maybe_undefined1); |
6629 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6640 if (Token::IsOrderedRelationalCompareOp(op_)) { |
6630 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 6641 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6669 __ And(tmp1, tmp1, kIsInternalizedMask); | 6680 __ And(tmp1, tmp1, kIsInternalizedMask); |
6670 __ Branch(&miss, eq, tmp1, Operand(zero_reg)); | 6681 __ Branch(&miss, eq, tmp1, Operand(zero_reg)); |
6671 // Make sure a0 is non-zero. At this point input operands are | 6682 // Make sure a0 is non-zero. At this point input operands are |
6672 // guaranteed to be non-zero. | 6683 // guaranteed to be non-zero. |
6673 ASSERT(right.is(a0)); | 6684 ASSERT(right.is(a0)); |
6674 STATIC_ASSERT(EQUAL == 0); | 6685 STATIC_ASSERT(EQUAL == 0); |
6675 STATIC_ASSERT(kSmiTag == 0); | 6686 STATIC_ASSERT(kSmiTag == 0); |
6676 __ mov(v0, right); | 6687 __ mov(v0, right); |
6677 // Internalized strings are compared by identity. | 6688 // Internalized strings are compared by identity. |
6678 __ Ret(ne, left, Operand(right)); | 6689 __ Ret(ne, left, Operand(right)); |
| 6690 ASSERT(is_int16(EQUAL)); |
| 6691 __ Ret(USE_DELAY_SLOT); |
6679 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 6692 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
6680 __ Ret(); | |
6681 | 6693 |
6682 __ bind(&miss); | 6694 __ bind(&miss); |
6683 GenerateMiss(masm); | 6695 GenerateMiss(masm); |
6684 } | 6696 } |
6685 | 6697 |
6686 | 6698 |
6687 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | 6699 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { |
6688 ASSERT(state_ == CompareIC::UNIQUE_NAME); | 6700 ASSERT(state_ == CompareIC::UNIQUE_NAME); |
6689 ASSERT(GetCondition() == eq); | 6701 ASSERT(GetCondition() == eq); |
6690 Label miss; | 6702 Label miss; |
(...skipping 863 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7554 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); | 7566 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); |
7555 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 7567 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
7556 int parameter_count_offset = | 7568 int parameter_count_offset = |
7557 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 7569 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
7558 __ lw(a1, MemOperand(fp, parameter_count_offset)); | 7570 __ lw(a1, MemOperand(fp, parameter_count_offset)); |
7559 if (function_mode_ == JS_FUNCTION_STUB_MODE) { | 7571 if (function_mode_ == JS_FUNCTION_STUB_MODE) { |
7560 __ Addu(a1, a1, Operand(1)); | 7572 __ Addu(a1, a1, Operand(1)); |
7561 } | 7573 } |
7562 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 7574 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
7563 __ sll(a1, a1, kPointerSizeLog2); | 7575 __ sll(a1, a1, kPointerSizeLog2); |
| 7576 __ Ret(USE_DELAY_SLOT); |
7564 __ Addu(sp, sp, a1); | 7577 __ Addu(sp, sp, a1); |
7565 __ Ret(); | |
7566 } | 7578 } |
7567 | 7579 |
7568 | 7580 |
7569 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 7581 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
7570 if (entry_hook_ != NULL) { | 7582 if (entry_hook_ != NULL) { |
7571 ProfileEntryHookStub stub; | 7583 ProfileEntryHookStub stub; |
7572 __ push(ra); | 7584 __ push(ra); |
7573 __ CallStub(&stub); | 7585 __ CallStub(&stub); |
7574 __ pop(ra); | 7586 __ pop(ra); |
7575 } | 7587 } |
(...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7917 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); | 7929 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); |
7918 } | 7930 } |
7919 } | 7931 } |
7920 | 7932 |
7921 | 7933 |
7922 #undef __ | 7934 #undef __ |
7923 | 7935 |
7924 } } // namespace v8::internal | 7936 } } // namespace v8::internal |
7925 | 7937 |
7926 #endif // V8_TARGET_ARCH_MIPS | 7938 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |