| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 24 matching lines...) Expand all Loading... |
| 35 #include "regexp-macro-assembler.h" | 35 #include "regexp-macro-assembler.h" |
| 36 | 36 |
| 37 namespace v8 { | 37 namespace v8 { |
| 38 namespace internal { | 38 namespace internal { |
| 39 | 39 |
| 40 | 40 |
| 41 #define __ ACCESS_MASM(masm) | 41 #define __ ACCESS_MASM(masm) |
| 42 | 42 |
| 43 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 43 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 44 Label* slow, | 44 Label* slow, |
| 45 Condition cc, | 45 Condition cc); |
| 46 bool never_nan_nan); | |
| 47 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 48 Register lhs, | 47 Register lhs, |
| 49 Register rhs, | 48 Register rhs, |
| 50 Label* rhs_not_nan, | 49 Label* rhs_not_nan, |
| 51 Label* slow, | 50 Label* slow, |
| 52 bool strict); | 51 bool strict); |
| 53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); | 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); |
| 54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 55 Register lhs, | 54 Register lhs, |
| 56 Register rhs); | 55 Register rhs); |
| (...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 620 __ Call(stub1.GetCode()); | 619 __ Call(stub1.GetCode()); |
| 621 // Write Smi from a1 to a1 and a0 in double format. | 620 // Write Smi from a1 to a1 and a0 in double format. |
| 622 __ mov(scratch1, a1); | 621 __ mov(scratch1, a1); |
| 623 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); | 622 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); |
| 624 __ Call(stub2.GetCode()); | 623 __ Call(stub2.GetCode()); |
| 625 __ pop(ra); | 624 __ pop(ra); |
| 626 } | 625 } |
| 627 } | 626 } |
| 628 | 627 |
| 629 | 628 |
| 630 void FloatingPointHelper::LoadOperands( | |
| 631 MacroAssembler* masm, | |
| 632 FloatingPointHelper::Destination destination, | |
| 633 Register heap_number_map, | |
| 634 Register scratch1, | |
| 635 Register scratch2, | |
| 636 Label* slow) { | |
| 637 | |
| 638 // Load right operand (a0) to f12 or a2/a3. | |
| 639 LoadNumber(masm, destination, | |
| 640 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow); | |
| 641 | |
| 642 // Load left operand (a1) to f14 or a0/a1. | |
| 643 LoadNumber(masm, destination, | |
| 644 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow); | |
| 645 } | |
| 646 | |
| 647 | |
| 648 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | 629 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
| 649 Destination destination, | 630 Destination destination, |
| 650 Register object, | 631 Register object, |
| 651 FPURegister dst, | 632 FPURegister dst, |
| 652 Register dst1, | 633 Register dst1, |
| 653 Register dst2, | 634 Register dst2, |
| 654 Register heap_number_map, | 635 Register heap_number_map, |
| 655 Register scratch1, | 636 Register scratch1, |
| 656 Register scratch2, | 637 Register scratch2, |
| 657 Label* not_number) { | 638 Label* not_number) { |
| (...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 915 Register scratch2, | 896 Register scratch2, |
| 916 Register scratch3, | 897 Register scratch3, |
| 917 DoubleRegister double_scratch, | 898 DoubleRegister double_scratch, |
| 918 Label* not_int32) { | 899 Label* not_int32) { |
| 919 ASSERT(!dst.is(object)); | 900 ASSERT(!dst.is(object)); |
| 920 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | 901 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
| 921 ASSERT(!scratch1.is(scratch2) && | 902 ASSERT(!scratch1.is(scratch2) && |
| 922 !scratch1.is(scratch3) && | 903 !scratch1.is(scratch3) && |
| 923 !scratch2.is(scratch3)); | 904 !scratch2.is(scratch3)); |
| 924 | 905 |
| 925 Label done; | 906 Label done, maybe_undefined; |
| 926 | 907 |
| 927 __ UntagAndJumpIfSmi(dst, object, &done); | 908 __ UntagAndJumpIfSmi(dst, object, &done); |
| 928 | 909 |
| 929 __ AssertRootValue(heap_number_map, | 910 __ AssertRootValue(heap_number_map, |
| 930 Heap::kHeapNumberMapRootIndex, | 911 Heap::kHeapNumberMapRootIndex, |
| 931 "HeapNumberMap register clobbered."); | 912 "HeapNumberMap register clobbered."); |
| 932 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 913 |
| 914 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); |
| 933 | 915 |
| 934 // Object is a heap number. | 916 // Object is a heap number. |
| 935 // Convert the floating point value to a 32-bit integer. | 917 // Convert the floating point value to a 32-bit integer. |
| 936 if (CpuFeatures::IsSupported(FPU)) { | 918 if (CpuFeatures::IsSupported(FPU)) { |
| 937 CpuFeatures::Scope scope(FPU); | 919 CpuFeatures::Scope scope(FPU); |
| 938 // Load the double value. | 920 // Load the double value. |
| 939 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); | 921 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 940 | 922 |
| 941 FPURegister single_scratch = double_scratch.low(); | 923 FPURegister single_scratch = double_scratch.low(); |
| 942 Register except_flag = scratch2; | 924 Register except_flag = scratch2; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 976 __ sllv(scratch2, scratch2, scratch3); | 958 __ sllv(scratch2, scratch2, scratch3); |
| 977 __ Or(dst, dst, scratch2); | 959 __ Or(dst, dst, scratch2); |
| 978 // Set the sign. | 960 // Set the sign. |
| 979 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 961 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
| 980 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 962 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 981 Label skip_sub; | 963 Label skip_sub; |
| 982 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg)); | 964 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg)); |
| 983 __ Subu(dst, zero_reg, dst); | 965 __ Subu(dst, zero_reg, dst); |
| 984 __ bind(&skip_sub); | 966 __ bind(&skip_sub); |
| 985 } | 967 } |
| 968 __ Branch(&done); |
| 969 |
| 970 __ bind(&maybe_undefined); |
| 971 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 972 __ Branch(not_int32, ne, object, Operand(at)); |
| 973 // |undefined| is truncated to 0. |
| 974 __ li(dst, Operand(Smi::FromInt(0))); |
| 975 // Fall through. |
| 986 | 976 |
| 987 __ bind(&done); | 977 __ bind(&done); |
| 988 } | 978 } |
| 989 | 979 |
| 990 | 980 |
| 991 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, | 981 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, |
| 992 Register src1, | 982 Register src1, |
| 993 Register src2, | 983 Register src2, |
| 994 Register dst, | 984 Register dst, |
| 995 Register scratch, | 985 Register scratch, |
| (...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1176 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | 1166 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); |
| 1177 __ Ret(); | 1167 __ Ret(); |
| 1178 } | 1168 } |
| 1179 | 1169 |
| 1180 | 1170 |
| 1181 // Handle the case where the lhs and rhs are the same object. | 1171 // Handle the case where the lhs and rhs are the same object. |
| 1182 // Equality is almost reflexive (everything but NaN), so this is a test | 1172 // Equality is almost reflexive (everything but NaN), so this is a test |
| 1183 // for "identity and not NaN". | 1173 // for "identity and not NaN". |
| 1184 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 1174 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 1185 Label* slow, | 1175 Label* slow, |
| 1186 Condition cc, | 1176 Condition cc) { |
| 1187 bool never_nan_nan) { | |
| 1188 Label not_identical; | 1177 Label not_identical; |
| 1189 Label heap_number, return_equal; | 1178 Label heap_number, return_equal; |
| 1190 Register exp_mask_reg = t5; | 1179 Register exp_mask_reg = t5; |
| 1191 | 1180 |
| 1192 __ Branch(¬_identical, ne, a0, Operand(a1)); | 1181 __ Branch(¬_identical, ne, a0, Operand(a1)); |
| 1193 | 1182 |
| 1194 // The two objects are identical. If we know that one of them isn't NaN then | 1183 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); |
| 1195 // we now know they test equal. | |
| 1196 if (cc != eq || !never_nan_nan) { | |
| 1197 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); | |
| 1198 | 1184 |
| 1199 // Test for NaN. Sadly, we can't just compare to factory->nan_value(), | 1185 // Test for NaN. Sadly, we can't just compare to factory->nan_value(), |
| 1200 // so we do the second best thing - test it ourselves. | 1186 // so we do the second best thing - test it ourselves. |
| 1201 // They are both equal and they are not both Smis so both of them are not | 1187 // They are both equal and they are not both Smis so both of them are not |
| 1202 // Smis. If it's not a heap number, then return equal. | 1188 // Smis. If it's not a heap number, then return equal. |
| 1203 if (cc == less || cc == greater) { | 1189 if (cc == less || cc == greater) { |
| 1204 __ GetObjectType(a0, t4, t4); | 1190 __ GetObjectType(a0, t4, t4); |
| 1205 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1191 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1206 } else { | 1192 } else { |
| 1207 __ GetObjectType(a0, t4, t4); | 1193 __ GetObjectType(a0, t4, t4); |
| 1208 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE)); | 1194 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE)); |
| 1209 // Comparing JS objects with <=, >= is complicated. | 1195 // Comparing JS objects with <=, >= is complicated. |
| 1210 if (cc != eq) { | 1196 if (cc != eq) { |
| 1211 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1197 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1212 // Normally here we fall through to return_equal, but undefined is | 1198 // Normally here we fall through to return_equal, but undefined is |
| 1213 // special: (undefined == undefined) == true, but | 1199 // special: (undefined == undefined) == true, but |
| 1214 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 1200 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
| 1215 if (cc == less_equal || cc == greater_equal) { | 1201 if (cc == less_equal || cc == greater_equal) { |
| 1216 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); | 1202 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); |
| 1217 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); | 1203 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); |
| 1218 __ Branch(&return_equal, ne, a0, Operand(t2)); | 1204 __ Branch(&return_equal, ne, a0, Operand(t2)); |
| 1219 if (cc == le) { | 1205 if (cc == le) { |
| 1220 // undefined <= undefined should fail. | 1206 // undefined <= undefined should fail. |
| 1221 __ li(v0, Operand(GREATER)); | 1207 __ li(v0, Operand(GREATER)); |
| 1222 } else { | 1208 } else { |
| 1223 // undefined >= undefined should fail. | 1209 // undefined >= undefined should fail. |
| 1224 __ li(v0, Operand(LESS)); | 1210 __ li(v0, Operand(LESS)); |
| 1225 } | |
| 1226 __ Ret(); | |
| 1227 } | 1211 } |
| 1212 __ Ret(); |
| 1228 } | 1213 } |
| 1229 } | 1214 } |
| 1230 } | 1215 } |
| 1231 | 1216 |
| 1232 __ bind(&return_equal); | 1217 __ bind(&return_equal); |
| 1233 | 1218 |
| 1234 if (cc == less) { | 1219 if (cc == less) { |
| 1235 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. | 1220 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. |
| 1236 } else if (cc == greater) { | 1221 } else if (cc == greater) { |
| 1237 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. | 1222 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. |
| 1238 } else { | 1223 } else { |
| 1239 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. | 1224 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. |
| 1240 } | 1225 } |
| 1241 __ Ret(); | 1226 __ Ret(); |
| 1242 | 1227 |
| 1243 if (cc != eq || !never_nan_nan) { | 1228 // For less and greater we don't have to check for NaN since the result of |
| 1244 // For less and greater we don't have to check for NaN since the result of | 1229 // x < x is false regardless. For the others here is some code to check |
| 1245 // x < x is false regardless. For the others here is some code to check | 1230 // for NaN. |
| 1246 // for NaN. | 1231 if (cc != lt && cc != gt) { |
| 1247 if (cc != lt && cc != gt) { | 1232 __ bind(&heap_number); |
| 1248 __ bind(&heap_number); | 1233 // It is a heap number, so return non-equal if it's NaN and equal if it's |
| 1249 // It is a heap number, so return non-equal if it's NaN and equal if it's | 1234 // not NaN. |
| 1250 // not NaN. | |
| 1251 | 1235 |
| 1252 // The representation of NaN values has all exponent bits (52..62) set, | 1236 // The representation of NaN values has all exponent bits (52..62) set, |
| 1253 // and not all mantissa bits (0..51) clear. | 1237 // and not all mantissa bits (0..51) clear. |
| 1254 // Read top bits of double representation (second word of value). | 1238 // Read top bits of double representation (second word of value). |
| 1255 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | 1239 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
| 1256 // Test that exponent bits are all set. | 1240 // Test that exponent bits are all set. |
| 1257 __ And(t3, t2, Operand(exp_mask_reg)); | 1241 __ And(t3, t2, Operand(exp_mask_reg)); |
| 1258 // If all bits not set (ne cond), then not a NaN, objects are equal. | 1242 // If all bits not set (ne cond), then not a NaN, objects are equal. |
| 1259 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg)); | 1243 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg)); |
| 1260 | 1244 |
| 1261 // Shift out flag and all exponent bits, retaining only mantissa. | 1245 // Shift out flag and all exponent bits, retaining only mantissa. |
| 1262 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord); | 1246 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord); |
| 1263 // Or with all low-bits of mantissa. | 1247 // Or with all low-bits of mantissa. |
| 1264 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); | 1248 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
| 1265 __ Or(v0, t3, Operand(t2)); | 1249 __ Or(v0, t3, Operand(t2)); |
| 1266 // For equal we already have the right value in v0: Return zero (equal) | 1250 // For equal we already have the right value in v0: Return zero (equal) |
| 1267 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 1251 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
| 1268 // not (it's a NaN). For <= and >= we need to load v0 with the failing | 1252 // not (it's a NaN). For <= and >= we need to load v0 with the failing |
| 1269 // value if it's a NaN. | 1253 // value if it's a NaN. |
| 1270 if (cc != eq) { | 1254 if (cc != eq) { |
| 1271 // All-zero means Infinity means equal. | 1255 // All-zero means Infinity means equal. |
| 1272 __ Ret(eq, v0, Operand(zero_reg)); | 1256 __ Ret(eq, v0, Operand(zero_reg)); |
| 1273 if (cc == le) { | 1257 if (cc == le) { |
| 1274 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. | 1258 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. |
| 1275 } else { | 1259 } else { |
| 1276 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. | 1260 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. |
| 1277 } | |
| 1278 } | 1261 } |
| 1279 __ Ret(); | |
| 1280 } | 1262 } |
| 1281 // No fall through here. | 1263 __ Ret(); |
| 1282 } | 1264 } |
| 1265 // No fall through here. |
| 1283 | 1266 |
| 1284 __ bind(¬_identical); | 1267 __ bind(¬_identical); |
| 1285 } | 1268 } |
| 1286 | 1269 |
| 1287 | 1270 |
| 1288 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 1271 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 1289 Register lhs, | 1272 Register lhs, |
| 1290 Register rhs, | 1273 Register rhs, |
| 1291 Label* both_loaded_as_doubles, | 1274 Label* both_loaded_as_doubles, |
| 1292 Label* slow, | 1275 Label* slow, |
| (...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1745 // Generate code to lookup number in the number string cache. | 1728 // Generate code to lookup number in the number string cache. |
| 1746 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime); | 1729 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime); |
| 1747 __ DropAndRet(1); | 1730 __ DropAndRet(1); |
| 1748 | 1731 |
| 1749 __ bind(&runtime); | 1732 __ bind(&runtime); |
| 1750 // Handle number to string in the runtime system if not found in the cache. | 1733 // Handle number to string in the runtime system if not found in the cache. |
| 1751 __ TailCallRuntime(Runtime::kNumberToString, 1, 1); | 1734 __ TailCallRuntime(Runtime::kNumberToString, 1, 1); |
| 1752 } | 1735 } |
| 1753 | 1736 |
| 1754 | 1737 |
| 1755 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared. | 1738 static void ICCompareStub_CheckInputType(MacroAssembler* masm, |
| 1756 // On exit, v0 is 0, positive, or negative (smi) to indicate the result | 1739 Register input, |
| 1757 // of the comparison. | 1740 Register scratch, |
| 1758 void CompareStub::Generate(MacroAssembler* masm) { | 1741 CompareIC::State expected, |
| 1742 Label* fail) { |
| 1743 Label ok; |
| 1744 if (expected == CompareIC::SMI) { |
| 1745 __ JumpIfNotSmi(input, fail); |
| 1746 } else if (expected == CompareIC::HEAP_NUMBER) { |
| 1747 __ JumpIfSmi(input, &ok); |
| 1748 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, |
| 1749 DONT_DO_SMI_CHECK); |
| 1750 } |
| 1751 // We could be strict about symbol/string here, but as long as |
| 1752 // hydrogen doesn't care, the stub doesn't have to care either. |
| 1753 __ bind(&ok); |
| 1754 } |
| 1755 |
| 1756 |
| 1757 // On entry a1 and a2 are the values to be compared. |
| 1758 // On exit a0 is 0, positive or negative to indicate the result of |
| 1759 // the comparison. |
| 1760 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { |
| 1761 Register lhs = a1; |
| 1762 Register rhs = a0; |
| 1763 Condition cc = GetCondition(); |
| 1764 |
| 1765 Label miss; |
| 1766 ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss); |
| 1767 ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss); |
| 1768 |
| 1759 Label slow; // Call builtin. | 1769 Label slow; // Call builtin. |
| 1760 Label not_smis, both_loaded_as_doubles; | 1770 Label not_smis, both_loaded_as_doubles; |
| 1761 | 1771 |
| 1762 | 1772 Label not_two_smis, smi_done; |
| 1763 if (include_smi_compare_) { | 1773 __ Or(a2, a1, a0); |
| 1764 Label not_two_smis, smi_done; | 1774 __ JumpIfNotSmi(a2, ¬_two_smis); |
| 1765 __ Or(a2, a1, a0); | 1775 __ sra(a1, a1, 1); |
| 1766 __ JumpIfNotSmi(a2, ¬_two_smis); | 1776 __ sra(a0, a0, 1); |
| 1767 __ sra(a1, a1, 1); | 1777 __ Ret(USE_DELAY_SLOT); |
| 1768 __ sra(a0, a0, 1); | 1778 __ subu(v0, a1, a0); |
| 1769 __ Ret(USE_DELAY_SLOT); | 1779 __ bind(¬_two_smis); |
| 1770 __ subu(v0, a1, a0); | |
| 1771 __ bind(¬_two_smis); | |
| 1772 } else if (FLAG_debug_code) { | |
| 1773 __ Or(a2, a1, a0); | |
| 1774 __ And(a2, a2, kSmiTagMask); | |
| 1775 __ Assert(ne, "CompareStub: unexpected smi operands.", | |
| 1776 a2, Operand(zero_reg)); | |
| 1777 } | |
| 1778 | |
| 1779 | 1780 |
| 1780 // NOTICE! This code is only reached after a smi-fast-case check, so | 1781 // NOTICE! This code is only reached after a smi-fast-case check, so |
| 1781 // it is certain that at least one operand isn't a smi. | 1782 // it is certain that at least one operand isn't a smi. |
| 1782 | 1783 |
| 1783 // Handle the case where the objects are identical. Either returns the answer | 1784 // Handle the case where the objects are identical. Either returns the answer |
| 1784 // or goes to slow. Only falls through if the objects were not identical. | 1785 // or goes to slow. Only falls through if the objects were not identical. |
| 1785 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); | 1786 EmitIdenticalObjectComparison(masm, &slow, cc); |
| 1786 | 1787 |
| 1787 // If either is a Smi (we know that not both are), then they can only | 1788 // If either is a Smi (we know that not both are), then they can only |
| 1788 // be strictly equal if the other is a HeapNumber. | 1789 // be strictly equal if the other is a HeapNumber. |
| 1789 STATIC_ASSERT(kSmiTag == 0); | 1790 STATIC_ASSERT(kSmiTag == 0); |
| 1790 ASSERT_EQ(0, Smi::FromInt(0)); | 1791 ASSERT_EQ(0, Smi::FromInt(0)); |
| 1791 __ And(t2, lhs_, Operand(rhs_)); | 1792 __ And(t2, lhs, Operand(rhs)); |
| 1792 __ JumpIfNotSmi(t2, ¬_smis, t0); | 1793 __ JumpIfNotSmi(t2, ¬_smis, t0); |
| 1793 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | 1794 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
| 1794 // 1) Return the answer. | 1795 // 1) Return the answer. |
| 1795 // 2) Go to slow. | 1796 // 2) Go to slow. |
| 1796 // 3) Fall through to both_loaded_as_doubles. | 1797 // 3) Fall through to both_loaded_as_doubles. |
| 1797 // 4) Jump to rhs_not_nan. | 1798 // 4) Jump to rhs_not_nan. |
| 1798 // In cases 3 and 4 we have found out we were dealing with a number-number | 1799 // In cases 3 and 4 we have found out we were dealing with a number-number |
| 1799 // comparison and the numbers have been loaded into f12 and f14 as doubles, | 1800 // comparison and the numbers have been loaded into f12 and f14 as doubles, |
| 1800 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. | 1801 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. |
| 1801 EmitSmiNonsmiComparison(masm, lhs_, rhs_, | 1802 EmitSmiNonsmiComparison(masm, lhs, rhs, |
| 1802 &both_loaded_as_doubles, &slow, strict_); | 1803 &both_loaded_as_doubles, &slow, strict()); |
| 1803 | 1804 |
| 1804 __ bind(&both_loaded_as_doubles); | 1805 __ bind(&both_loaded_as_doubles); |
| 1805 // f12, f14 are the double representations of the left hand side | 1806 // f12, f14 are the double representations of the left hand side |
| 1806 // and the right hand side if we have FPU. Otherwise a2, a3 represent | 1807 // and the right hand side if we have FPU. Otherwise a2, a3 represent |
| 1807 // left hand side and a0, a1 represent right hand side. | 1808 // left hand side and a0, a1 represent right hand side. |
| 1808 | 1809 |
| 1809 Isolate* isolate = masm->isolate(); | 1810 Isolate* isolate = masm->isolate(); |
| 1810 if (CpuFeatures::IsSupported(FPU)) { | 1811 if (CpuFeatures::IsSupported(FPU)) { |
| 1811 CpuFeatures::Scope scope(FPU); | 1812 CpuFeatures::Scope scope(FPU); |
| 1812 Label nan; | 1813 Label nan; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1828 // Check if EQUAL condition is satisfied. If true, move conditionally | 1829 // Check if EQUAL condition is satisfied. If true, move conditionally |
| 1829 // result to v0. | 1830 // result to v0. |
| 1830 __ c(EQ, D, f12, f14); | 1831 __ c(EQ, D, f12, f14); |
| 1831 __ Movt(v0, t2); | 1832 __ Movt(v0, t2); |
| 1832 | 1833 |
| 1833 __ Ret(); | 1834 __ Ret(); |
| 1834 | 1835 |
| 1835 __ bind(&nan); | 1836 __ bind(&nan); |
| 1836 // NaN comparisons always fail. | 1837 // NaN comparisons always fail. |
| 1837 // Load whatever we need in v0 to make the comparison fail. | 1838 // Load whatever we need in v0 to make the comparison fail. |
| 1838 if (cc_ == lt || cc_ == le) { | 1839 if (cc == lt || cc == le) { |
| 1839 __ li(v0, Operand(GREATER)); | 1840 __ li(v0, Operand(GREATER)); |
| 1840 } else { | 1841 } else { |
| 1841 __ li(v0, Operand(LESS)); | 1842 __ li(v0, Operand(LESS)); |
| 1842 } | 1843 } |
| 1843 __ Ret(); | 1844 __ Ret(); |
| 1844 } else { | 1845 } else { |
| 1845 // Checks for NaN in the doubles we have loaded. Can return the answer or | 1846 // Checks for NaN in the doubles we have loaded. Can return the answer or |
| 1846 // fall through if neither is a NaN. Also binds rhs_not_nan. | 1847 // fall through if neither is a NaN. Also binds rhs_not_nan. |
| 1847 EmitNanCheck(masm, cc_); | 1848 EmitNanCheck(masm, cc); |
| 1848 | 1849 |
| 1849 // Compares two doubles that are not NaNs. Returns the answer. | 1850 // Compares two doubles that are not NaNs. Returns the answer. |
| 1850 // Never falls through. | 1851 // Never falls through. |
| 1851 EmitTwoNonNanDoubleComparison(masm, cc_); | 1852 EmitTwoNonNanDoubleComparison(masm, cc); |
| 1852 } | 1853 } |
| 1853 | 1854 |
| 1854 __ bind(¬_smis); | 1855 __ bind(¬_smis); |
| 1855 // At this point we know we are dealing with two different objects, | 1856 // At this point we know we are dealing with two different objects, |
| 1856 // and neither of them is a Smi. The objects are in lhs_ and rhs_. | 1857 // and neither of them is a Smi. The objects are in lhs_ and rhs_. |
| 1857 if (strict_) { | 1858 if (strict()) { |
| 1858 // This returns non-equal for some object types, or falls through if it | 1859 // This returns non-equal for some object types, or falls through if it |
| 1859 // was not lucky. | 1860 // was not lucky. |
| 1860 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); | 1861 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); |
| 1861 } | 1862 } |
| 1862 | 1863 |
| 1863 Label check_for_symbols; | 1864 Label check_for_symbols; |
| 1864 Label flat_string_check; | 1865 Label flat_string_check; |
| 1865 // Check for heap-number-heap-number comparison. Can jump to slow case, | 1866 // Check for heap-number-heap-number comparison. Can jump to slow case, |
| 1866 // or load both doubles and jump to the code that handles | 1867 // or load both doubles and jump to the code that handles |
| 1867 // that case. If the inputs are not doubles then jumps to check_for_symbols. | 1868 // that case. If the inputs are not doubles then jumps to check_for_symbols. |
| 1868 // In this case a2 will contain the type of lhs_. | 1869 // In this case a2 will contain the type of lhs_. |
| 1869 EmitCheckForTwoHeapNumbers(masm, | 1870 EmitCheckForTwoHeapNumbers(masm, |
| 1870 lhs_, | 1871 lhs, |
| 1871 rhs_, | 1872 rhs, |
| 1872 &both_loaded_as_doubles, | 1873 &both_loaded_as_doubles, |
| 1873 &check_for_symbols, | 1874 &check_for_symbols, |
| 1874 &flat_string_check); | 1875 &flat_string_check); |
| 1875 | 1876 |
| 1876 __ bind(&check_for_symbols); | 1877 __ bind(&check_for_symbols); |
| 1877 if (cc_ == eq && !strict_) { | 1878 if (cc == eq && !strict()) { |
| 1878 // Returns an answer for two symbols or two detectable objects. | 1879 // Returns an answer for two symbols or two detectable objects. |
| 1879 // Otherwise jumps to string case or not both strings case. | 1880 // Otherwise jumps to string case or not both strings case. |
| 1880 // Assumes that a2 is the type of lhs_ on entry. | 1881 // Assumes that a2 is the type of lhs_ on entry. |
| 1881 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); | 1882 EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow); |
| 1882 } | 1883 } |
| 1883 | 1884 |
| 1884 // Check for both being sequential ASCII strings, and inline if that is the | 1885 // Check for both being sequential ASCII strings, and inline if that is the |
| 1885 // case. | 1886 // case. |
| 1886 __ bind(&flat_string_check); | 1887 __ bind(&flat_string_check); |
| 1887 | 1888 |
| 1888 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow); | 1889 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow); |
| 1889 | 1890 |
| 1890 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3); | 1891 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3); |
| 1891 if (cc_ == eq) { | 1892 if (cc == eq) { |
| 1892 StringCompareStub::GenerateFlatAsciiStringEquals(masm, | 1893 StringCompareStub::GenerateFlatAsciiStringEquals(masm, |
| 1893 lhs_, | 1894 lhs, |
| 1894 rhs_, | 1895 rhs, |
| 1895 a2, | 1896 a2, |
| 1896 a3, | 1897 a3, |
| 1897 t0); | 1898 t0); |
| 1898 } else { | 1899 } else { |
| 1899 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, | 1900 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, |
| 1900 lhs_, | 1901 lhs, |
| 1901 rhs_, | 1902 rhs, |
| 1902 a2, | 1903 a2, |
| 1903 a3, | 1904 a3, |
| 1904 t0, | 1905 t0, |
| 1905 t1); | 1906 t1); |
| 1906 } | 1907 } |
| 1907 // Never falls through to here. | 1908 // Never falls through to here. |
| 1908 | 1909 |
| 1909 __ bind(&slow); | 1910 __ bind(&slow); |
| 1910 // Prepare for call to builtin. Push object pointers, a0 (lhs) first, | 1911 // Prepare for call to builtin. Push object pointers, a0 (lhs) first, |
| 1911 // a1 (rhs) second. | 1912 // a1 (rhs) second. |
| 1912 __ Push(lhs_, rhs_); | 1913 __ Push(lhs, rhs); |
| 1913 // Figure out which native to call and setup the arguments. | 1914 // Figure out which native to call and setup the arguments. |
| 1914 Builtins::JavaScript native; | 1915 Builtins::JavaScript native; |
| 1915 if (cc_ == eq) { | 1916 if (cc == eq) { |
| 1916 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | 1917 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
| 1917 } else { | 1918 } else { |
| 1918 native = Builtins::COMPARE; | 1919 native = Builtins::COMPARE; |
| 1919 int ncr; // NaN compare result. | 1920 int ncr; // NaN compare result. |
| 1920 if (cc_ == lt || cc_ == le) { | 1921 if (cc == lt || cc == le) { |
| 1921 ncr = GREATER; | 1922 ncr = GREATER; |
| 1922 } else { | 1923 } else { |
| 1923 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases. | 1924 ASSERT(cc == gt || cc == ge); // Remaining cases. |
| 1924 ncr = LESS; | 1925 ncr = LESS; |
| 1925 } | 1926 } |
| 1926 __ li(a0, Operand(Smi::FromInt(ncr))); | 1927 __ li(a0, Operand(Smi::FromInt(ncr))); |
| 1927 __ push(a0); | 1928 __ push(a0); |
| 1928 } | 1929 } |
| 1929 | 1930 |
| 1930 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 1931 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| 1931 // tagged as a small integer. | 1932 // tagged as a small integer. |
| 1932 __ InvokeBuiltin(native, JUMP_FUNCTION); | 1933 __ InvokeBuiltin(native, JUMP_FUNCTION); |
| 1934 |
| 1935 __ bind(&miss); |
| 1936 GenerateMiss(masm); |
| 1933 } | 1937 } |
| 1934 | 1938 |
| 1935 | 1939 |
| 1936 // The stub expects its argument in the tos_ register and returns its result in | 1940 // The stub expects its argument in the tos_ register and returns its result in |
| 1937 // it, too: zero for false, and a non-zero value for true. | 1941 // it, too: zero for false, and a non-zero value for true. |
| 1938 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1942 void ToBooleanStub::Generate(MacroAssembler* masm) { |
| 1939 // This stub uses FPU instructions. | 1943 // This stub uses FPU instructions. |
| 1940 CpuFeatures::Scope scope(FPU); | 1944 CpuFeatures::Scope scope(FPU); |
| 1941 | 1945 |
| 1942 Label patch; | 1946 Label patch; |
| (...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2363 break; | 2367 break; |
| 2364 case Token::BIT_NOT: | 2368 case Token::BIT_NOT: |
| 2365 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); | 2369 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
| 2366 break; | 2370 break; |
| 2367 default: | 2371 default: |
| 2368 UNREACHABLE(); | 2372 UNREACHABLE(); |
| 2369 } | 2373 } |
| 2370 } | 2374 } |
| 2371 | 2375 |
| 2372 | 2376 |
| 2377 void BinaryOpStub::Initialize() { |
| 2378 platform_specific_bit_ = CpuFeatures::IsSupported(FPU); |
| 2379 } |
| 2380 |
| 2381 |
| 2373 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 2382 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 2374 Label get_result; | 2383 Label get_result; |
| 2375 | 2384 |
| 2376 __ Push(a1, a0); | 2385 __ Push(a1, a0); |
| 2377 | 2386 |
| 2378 __ li(a2, Operand(Smi::FromInt(MinorKey()))); | 2387 __ li(a2, Operand(Smi::FromInt(MinorKey()))); |
| 2379 __ li(a1, Operand(Smi::FromInt(op_))); | 2388 __ push(a2); |
| 2380 __ li(a0, Operand(Smi::FromInt(operands_type_))); | |
| 2381 __ Push(a2, a1, a0); | |
| 2382 | 2389 |
| 2383 __ TailCallExternalReference( | 2390 __ TailCallExternalReference( |
| 2384 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), | 2391 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
| 2385 masm->isolate()), | 2392 masm->isolate()), |
| 2386 5, | 2393 3, |
| 2387 1); | 2394 1); |
| 2388 } | 2395 } |
| 2389 | 2396 |
| 2390 | 2397 |
| 2391 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( | 2398 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
| 2392 MacroAssembler* masm) { | 2399 MacroAssembler* masm) { |
| 2393 UNIMPLEMENTED(); | 2400 UNIMPLEMENTED(); |
| 2394 } | 2401 } |
| 2395 | 2402 |
| 2396 | 2403 |
| 2397 void BinaryOpStub::Generate(MacroAssembler* masm) { | 2404 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, |
| 2398 // Explicitly allow generation of nested stubs. It is safe here because | 2405 Token::Value op) { |
| 2399 // generation code does not use any raw pointers. | |
| 2400 AllowStubCallsScope allow_stub_calls(masm, true); | |
| 2401 switch (operands_type_) { | |
| 2402 case BinaryOpIC::UNINITIALIZED: | |
| 2403 GenerateTypeTransition(masm); | |
| 2404 break; | |
| 2405 case BinaryOpIC::SMI: | |
| 2406 GenerateSmiStub(masm); | |
| 2407 break; | |
| 2408 case BinaryOpIC::INT32: | |
| 2409 GenerateInt32Stub(masm); | |
| 2410 break; | |
| 2411 case BinaryOpIC::HEAP_NUMBER: | |
| 2412 GenerateHeapNumberStub(masm); | |
| 2413 break; | |
| 2414 case BinaryOpIC::ODDBALL: | |
| 2415 GenerateOddballStub(masm); | |
| 2416 break; | |
| 2417 case BinaryOpIC::BOTH_STRING: | |
| 2418 GenerateBothStringStub(masm); | |
| 2419 break; | |
| 2420 case BinaryOpIC::STRING: | |
| 2421 GenerateStringStub(masm); | |
| 2422 break; | |
| 2423 case BinaryOpIC::GENERIC: | |
| 2424 GenerateGeneric(masm); | |
| 2425 break; | |
| 2426 default: | |
| 2427 UNREACHABLE(); | |
| 2428 } | |
| 2429 } | |
| 2430 | |
| 2431 | |
| 2432 void BinaryOpStub::PrintName(StringStream* stream) { | |
| 2433 const char* op_name = Token::Name(op_); | |
| 2434 const char* overwrite_name; | |
| 2435 switch (mode_) { | |
| 2436 case NO_OVERWRITE: overwrite_name = "Alloc"; break; | |
| 2437 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; | |
| 2438 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; | |
| 2439 default: overwrite_name = "UnknownOverwrite"; break; | |
| 2440 } | |
| 2441 stream->Add("BinaryOpStub_%s_%s_%s", | |
| 2442 op_name, | |
| 2443 overwrite_name, | |
| 2444 BinaryOpIC::GetName(operands_type_)); | |
| 2445 } | |
| 2446 | |
| 2447 | |
| 2448 | |
| 2449 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { | |
| 2450 Register left = a1; | 2406 Register left = a1; |
| 2451 Register right = a0; | 2407 Register right = a0; |
| 2452 | 2408 |
| 2453 Register scratch1 = t0; | 2409 Register scratch1 = t0; |
| 2454 Register scratch2 = t1; | 2410 Register scratch2 = t1; |
| 2455 | 2411 |
| 2456 ASSERT(right.is(a0)); | 2412 ASSERT(right.is(a0)); |
| 2457 STATIC_ASSERT(kSmiTag == 0); | 2413 STATIC_ASSERT(kSmiTag == 0); |
| 2458 | 2414 |
| 2459 Label not_smi_result; | 2415 Label not_smi_result; |
| 2460 switch (op_) { | 2416 switch (op) { |
| 2461 case Token::ADD: | 2417 case Token::ADD: |
| 2462 __ AdduAndCheckForOverflow(v0, left, right, scratch1); | 2418 __ AdduAndCheckForOverflow(v0, left, right, scratch1); |
| 2463 __ RetOnNoOverflow(scratch1); | 2419 __ RetOnNoOverflow(scratch1); |
| 2464 // No need to revert anything - right and left are intact. | 2420 // No need to revert anything - right and left are intact. |
| 2465 break; | 2421 break; |
| 2466 case Token::SUB: | 2422 case Token::SUB: |
| 2467 __ SubuAndCheckForOverflow(v0, left, right, scratch1); | 2423 __ SubuAndCheckForOverflow(v0, left, right, scratch1); |
| 2468 __ RetOnNoOverflow(scratch1); | 2424 __ RetOnNoOverflow(scratch1); |
| 2469 // No need to revert anything - right and left are intact. | 2425 // No need to revert anything - right and left are intact. |
| 2470 break; | 2426 break; |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2593 __ SmiTag(v0, scratch1); | 2549 __ SmiTag(v0, scratch1); |
| 2594 __ Ret(); | 2550 __ Ret(); |
| 2595 break; | 2551 break; |
| 2596 default: | 2552 default: |
| 2597 UNREACHABLE(); | 2553 UNREACHABLE(); |
| 2598 } | 2554 } |
| 2599 __ bind(¬_smi_result); | 2555 __ bind(¬_smi_result); |
| 2600 } | 2556 } |
| 2601 | 2557 |
| 2602 | 2558 |
| 2603 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, | 2559 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 2604 bool smi_operands, | 2560 Register result, |
| 2605 Label* not_numbers, | 2561 Register heap_number_map, |
| 2606 Label* gc_required) { | 2562 Register scratch1, |
| 2563 Register scratch2, |
| 2564 Label* gc_required, |
| 2565 OverwriteMode mode); |
| 2566 |
| 2567 |
| 2568 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
| 2569 BinaryOpIC::TypeInfo left_type, |
| 2570 BinaryOpIC::TypeInfo right_type, |
| 2571 bool smi_operands, |
| 2572 Label* not_numbers, |
| 2573 Label* gc_required, |
| 2574 Label* miss, |
| 2575 Token::Value op, |
| 2576 OverwriteMode mode) { |
| 2607 Register left = a1; | 2577 Register left = a1; |
| 2608 Register right = a0; | 2578 Register right = a0; |
| 2609 Register scratch1 = t3; | 2579 Register scratch1 = t3; |
| 2610 Register scratch2 = t5; | 2580 Register scratch2 = t5; |
| 2611 Register scratch3 = t0; | 2581 Register scratch3 = t0; |
| 2612 | 2582 |
| 2613 ASSERT(smi_operands || (not_numbers != NULL)); | 2583 ASSERT(smi_operands || (not_numbers != NULL)); |
| 2614 if (smi_operands) { | 2584 if (smi_operands) { |
| 2615 __ AssertSmi(left); | 2585 __ AssertSmi(left); |
| 2616 __ AssertSmi(right); | 2586 __ AssertSmi(right); |
| 2617 } | 2587 } |
| 2588 if (left_type == BinaryOpIC::SMI) { |
| 2589 __ JumpIfNotSmi(left, miss); |
| 2590 } |
| 2591 if (right_type == BinaryOpIC::SMI) { |
| 2592 __ JumpIfNotSmi(right, miss); |
| 2593 } |
| 2618 | 2594 |
| 2619 Register heap_number_map = t2; | 2595 Register heap_number_map = t2; |
| 2620 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2596 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2621 | 2597 |
| 2622 switch (op_) { | 2598 switch (op) { |
| 2623 case Token::ADD: | 2599 case Token::ADD: |
| 2624 case Token::SUB: | 2600 case Token::SUB: |
| 2625 case Token::MUL: | 2601 case Token::MUL: |
| 2626 case Token::DIV: | 2602 case Token::DIV: |
| 2627 case Token::MOD: { | 2603 case Token::MOD: { |
| 2628 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3 | 2604 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3 |
| 2629 // depending on whether FPU is available or not. | 2605 // depending on whether FPU is available or not. |
| 2630 FloatingPointHelper::Destination destination = | 2606 FloatingPointHelper::Destination destination = |
| 2631 CpuFeatures::IsSupported(FPU) && | 2607 CpuFeatures::IsSupported(FPU) && |
| 2632 op_ != Token::MOD ? | 2608 op != Token::MOD ? |
| 2633 FloatingPointHelper::kFPURegisters : | 2609 FloatingPointHelper::kFPURegisters : |
| 2634 FloatingPointHelper::kCoreRegisters; | 2610 FloatingPointHelper::kCoreRegisters; |
| 2635 | 2611 |
| 2636 // Allocate new heap number for result. | 2612 // Allocate new heap number for result. |
| 2637 Register result = s0; | 2613 Register result = s0; |
| 2638 GenerateHeapResultAllocation( | 2614 BinaryOpStub_GenerateHeapResultAllocation( |
| 2639 masm, result, heap_number_map, scratch1, scratch2, gc_required); | 2615 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
| 2640 | 2616 |
| 2641 // Load the operands. | 2617 // Load the operands. |
| 2642 if (smi_operands) { | 2618 if (smi_operands) { |
| 2643 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); | 2619 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); |
| 2644 } else { | 2620 } else { |
| 2645 FloatingPointHelper::LoadOperands(masm, | 2621 // Load right operand to f14 or a2/a3. |
| 2646 destination, | 2622 if (right_type == BinaryOpIC::INT32) { |
| 2647 heap_number_map, | 2623 FloatingPointHelper::LoadNumberAsInt32Double( |
| 2648 scratch1, | 2624 masm, right, destination, f14, f16, a2, a3, heap_number_map, |
| 2649 scratch2, | 2625 scratch1, scratch2, f2, miss); |
| 2650 not_numbers); | 2626 } else { |
| 2627 Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss |
| 2628 : not_numbers; |
| 2629 FloatingPointHelper::LoadNumber( |
| 2630 masm, destination, right, f14, a2, a3, heap_number_map, |
| 2631 scratch1, scratch2, fail); |
| 2632 } |
| 2633 // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it |
| 2634 // jumps to |miss|. |
| 2635 if (left_type == BinaryOpIC::INT32) { |
| 2636 FloatingPointHelper::LoadNumberAsInt32Double( |
| 2637 masm, left, destination, f12, f16, a0, a1, heap_number_map, |
| 2638 scratch1, scratch2, f2, miss); |
| 2639 } else { |
| 2640 Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss |
| 2641 : not_numbers; |
| 2642 FloatingPointHelper::LoadNumber( |
| 2643 masm, destination, left, f12, a0, a1, heap_number_map, |
| 2644 scratch1, scratch2, fail); |
| 2645 } |
| 2651 } | 2646 } |
| 2652 | 2647 |
| 2653 // Calculate the result. | 2648 // Calculate the result. |
| 2654 if (destination == FloatingPointHelper::kFPURegisters) { | 2649 if (destination == FloatingPointHelper::kFPURegisters) { |
| 2655 // Using FPU registers: | 2650 // Using FPU registers: |
| 2656 // f12: Left value. | 2651 // f12: Left value. |
| 2657 // f14: Right value. | 2652 // f14: Right value. |
| 2658 CpuFeatures::Scope scope(FPU); | 2653 CpuFeatures::Scope scope(FPU); |
| 2659 switch (op_) { | 2654 switch (op) { |
| 2660 case Token::ADD: | 2655 case Token::ADD: |
| 2661 __ add_d(f10, f12, f14); | 2656 __ add_d(f10, f12, f14); |
| 2662 break; | 2657 break; |
| 2663 case Token::SUB: | 2658 case Token::SUB: |
| 2664 __ sub_d(f10, f12, f14); | 2659 __ sub_d(f10, f12, f14); |
| 2665 break; | 2660 break; |
| 2666 case Token::MUL: | 2661 case Token::MUL: |
| 2667 __ mul_d(f10, f12, f14); | 2662 __ mul_d(f10, f12, f14); |
| 2668 break; | 2663 break; |
| 2669 case Token::DIV: | 2664 case Token::DIV: |
| 2670 __ div_d(f10, f12, f14); | 2665 __ div_d(f10, f12, f14); |
| 2671 break; | 2666 break; |
| 2672 default: | 2667 default: |
| 2673 UNREACHABLE(); | 2668 UNREACHABLE(); |
| 2674 } | 2669 } |
| 2675 | 2670 |
| 2676 // ARM uses a workaround here because of the unaligned HeapNumber | 2671 // ARM uses a workaround here because of the unaligned HeapNumber |
| 2677 // kValueOffset. On MIPS this workaround is built into sdc1 so | 2672 // kValueOffset. On MIPS this workaround is built into sdc1 so |
| 2678 // there's no point in generating even more instructions. | 2673 // there's no point in generating even more instructions. |
| 2679 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); | 2674 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 2680 __ Ret(USE_DELAY_SLOT); | 2675 __ Ret(USE_DELAY_SLOT); |
| 2681 __ mov(v0, result); | 2676 __ mov(v0, result); |
| 2682 } else { | 2677 } else { |
| 2683 // Call the C function to handle the double operation. | 2678 // Call the C function to handle the double operation. |
| 2684 FloatingPointHelper::CallCCodeForDoubleOperation(masm, | 2679 FloatingPointHelper::CallCCodeForDoubleOperation(masm, |
| 2685 op_, | 2680 op, |
| 2686 result, | 2681 result, |
| 2687 scratch1); | 2682 scratch1); |
| 2688 if (FLAG_debug_code) { | 2683 if (FLAG_debug_code) { |
| 2689 __ stop("Unreachable code."); | 2684 __ stop("Unreachable code."); |
| 2690 } | 2685 } |
| 2691 } | 2686 } |
| 2692 break; | 2687 break; |
| 2693 } | 2688 } |
| 2694 case Token::BIT_OR: | 2689 case Token::BIT_OR: |
| 2695 case Token::BIT_XOR: | 2690 case Token::BIT_XOR: |
| (...skipping 19 matching lines...) Expand all Loading... |
| 2715 right, | 2710 right, |
| 2716 a2, | 2711 a2, |
| 2717 heap_number_map, | 2712 heap_number_map, |
| 2718 scratch1, | 2713 scratch1, |
| 2719 scratch2, | 2714 scratch2, |
| 2720 scratch3, | 2715 scratch3, |
| 2721 f0, | 2716 f0, |
| 2722 not_numbers); | 2717 not_numbers); |
| 2723 } | 2718 } |
| 2724 Label result_not_a_smi; | 2719 Label result_not_a_smi; |
| 2725 switch (op_) { | 2720 switch (op) { |
| 2726 case Token::BIT_OR: | 2721 case Token::BIT_OR: |
| 2727 __ Or(a2, a3, Operand(a2)); | 2722 __ Or(a2, a3, Operand(a2)); |
| 2728 break; | 2723 break; |
| 2729 case Token::BIT_XOR: | 2724 case Token::BIT_XOR: |
| 2730 __ Xor(a2, a3, Operand(a2)); | 2725 __ Xor(a2, a3, Operand(a2)); |
| 2731 break; | 2726 break; |
| 2732 case Token::BIT_AND: | 2727 case Token::BIT_AND: |
| 2733 __ And(a2, a3, Operand(a2)); | 2728 __ And(a2, a3, Operand(a2)); |
| 2734 break; | 2729 break; |
| 2735 case Token::SAR: | 2730 case Token::SAR: |
| (...skipping 29 matching lines...) Expand all Loading... |
| 2765 __ SmiTag(v0, a2); | 2760 __ SmiTag(v0, a2); |
| 2766 __ Ret(); | 2761 __ Ret(); |
| 2767 | 2762 |
| 2768 // Allocate new heap number for result. | 2763 // Allocate new heap number for result. |
| 2769 __ bind(&result_not_a_smi); | 2764 __ bind(&result_not_a_smi); |
| 2770 Register result = t1; | 2765 Register result = t1; |
| 2771 if (smi_operands) { | 2766 if (smi_operands) { |
| 2772 __ AllocateHeapNumber( | 2767 __ AllocateHeapNumber( |
| 2773 result, scratch1, scratch2, heap_number_map, gc_required); | 2768 result, scratch1, scratch2, heap_number_map, gc_required); |
| 2774 } else { | 2769 } else { |
| 2775 GenerateHeapResultAllocation( | 2770 BinaryOpStub_GenerateHeapResultAllocation( |
| 2776 masm, result, heap_number_map, scratch1, scratch2, gc_required); | 2771 masm, result, heap_number_map, scratch1, scratch2, gc_required, |
| 2772 mode); |
| 2777 } | 2773 } |
| 2778 | 2774 |
| 2779 // a2: Answer as signed int32. | 2775 // a2: Answer as signed int32. |
| 2780 // t1: Heap number to write answer into. | 2776 // t1: Heap number to write answer into. |
| 2781 | 2777 |
| 2782 // Nothing can go wrong now, so move the heap number to v0, which is the | 2778 // Nothing can go wrong now, so move the heap number to v0, which is the |
| 2783 // result. | 2779 // result. |
| 2784 __ mov(v0, t1); | 2780 __ mov(v0, t1); |
| 2785 | 2781 |
| 2786 if (CpuFeatures::IsSupported(FPU)) { | 2782 if (CpuFeatures::IsSupported(FPU)) { |
| 2787 // Convert the int32 in a2 to the heap number in a0. As | 2783 // Convert the int32 in a2 to the heap number in a0. As |
| 2788 // mentioned above SHR needs to always produce a positive result. | 2784 // mentioned above SHR needs to always produce a positive result. |
| 2789 CpuFeatures::Scope scope(FPU); | 2785 CpuFeatures::Scope scope(FPU); |
| 2790 __ mtc1(a2, f0); | 2786 __ mtc1(a2, f0); |
| 2791 if (op_ == Token::SHR) { | 2787 if (op == Token::SHR) { |
| 2792 __ Cvt_d_uw(f0, f0, f22); | 2788 __ Cvt_d_uw(f0, f0, f22); |
| 2793 } else { | 2789 } else { |
| 2794 __ cvt_d_w(f0, f0); | 2790 __ cvt_d_w(f0, f0); |
| 2795 } | 2791 } |
| 2796 // ARM uses a workaround here because of the unaligned HeapNumber | 2792 // ARM uses a workaround here because of the unaligned HeapNumber |
| 2797 // kValueOffset. On MIPS this workaround is built into sdc1 so | 2793 // kValueOffset. On MIPS this workaround is built into sdc1 so |
| 2798 // there's no point in generating even more instructions. | 2794 // there's no point in generating even more instructions. |
| 2799 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 2795 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
| 2800 __ Ret(); | 2796 __ Ret(); |
| 2801 } else { | 2797 } else { |
| 2802 // Tail call that writes the int32 in a2 to the heap number in v0, using | 2798 // Tail call that writes the int32 in a2 to the heap number in v0, using |
| 2803 // a3 and a0 as scratch. v0 is preserved and returned. | 2799 // a3 and a0 as scratch. v0 is preserved and returned. |
| 2804 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0); | 2800 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0); |
| 2805 __ TailCallStub(&stub); | 2801 __ TailCallStub(&stub); |
| 2806 } | 2802 } |
| 2807 break; | 2803 break; |
| 2808 } | 2804 } |
| 2809 default: | 2805 default: |
| 2810 UNREACHABLE(); | 2806 UNREACHABLE(); |
| 2811 } | 2807 } |
| 2812 } | 2808 } |
| 2813 | 2809 |
| 2814 | 2810 |
| 2815 // Generate the smi code. If the operation on smis are successful this return is | 2811 // Generate the smi code. If the operation on smis are successful this return is |
| 2816 // generated. If the result is not a smi and heap number allocation is not | 2812 // generated. If the result is not a smi and heap number allocation is not |
| 2817 // requested the code falls through. If number allocation is requested but a | 2813 // requested the code falls through. If number allocation is requested but a |
| 2818 // heap number cannot be allocated the code jumps to the lable gc_required. | 2814 // heap number cannot be allocated the code jumps to the label gc_required. |
| 2819 void BinaryOpStub::GenerateSmiCode( | 2815 void BinaryOpStub_GenerateSmiCode( |
| 2820 MacroAssembler* masm, | 2816 MacroAssembler* masm, |
| 2821 Label* use_runtime, | 2817 Label* use_runtime, |
| 2822 Label* gc_required, | 2818 Label* gc_required, |
| 2823 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 2819 Token::Value op, |
| 2820 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
| 2821 OverwriteMode mode) { |
| 2824 Label not_smis; | 2822 Label not_smis; |
| 2825 | 2823 |
| 2826 Register left = a1; | 2824 Register left = a1; |
| 2827 Register right = a0; | 2825 Register right = a0; |
| 2828 Register scratch1 = t3; | 2826 Register scratch1 = t3; |
| 2829 | 2827 |
| 2830 // Perform combined smi check on both operands. | 2828 // Perform combined smi check on both operands. |
| 2831 __ Or(scratch1, left, Operand(right)); | 2829 __ Or(scratch1, left, Operand(right)); |
| 2832 STATIC_ASSERT(kSmiTag == 0); | 2830 STATIC_ASSERT(kSmiTag == 0); |
| 2833 __ JumpIfNotSmi(scratch1, ¬_smis); | 2831 __ JumpIfNotSmi(scratch1, ¬_smis); |
| 2834 | 2832 |
| 2835 // If the smi-smi operation results in a smi return is generated. | 2833 // If the smi-smi operation results in a smi return is generated. |
| 2836 GenerateSmiSmiOperation(masm); | 2834 BinaryOpStub_GenerateSmiSmiOperation(masm, op); |
| 2837 | 2835 |
| 2838 // If heap number results are possible generate the result in an allocated | 2836 // If heap number results are possible generate the result in an allocated |
| 2839 // heap number. | 2837 // heap number. |
| 2840 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 2838 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
| 2841 GenerateFPOperation(masm, true, use_runtime, gc_required); | 2839 BinaryOpStub_GenerateFPOperation( |
| 2840 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
| 2841 use_runtime, gc_required, ¬_smis, op, mode); |
| 2842 } | 2842 } |
| 2843 __ bind(¬_smis); | 2843 __ bind(¬_smis); |
| 2844 } | 2844 } |
| 2845 | 2845 |
| 2846 | 2846 |
| 2847 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 2847 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 2848 Label not_smis, call_runtime; | 2848 Label not_smis, call_runtime; |
| 2849 | 2849 |
| 2850 if (result_type_ == BinaryOpIC::UNINITIALIZED || | 2850 if (result_type_ == BinaryOpIC::UNINITIALIZED || |
| 2851 result_type_ == BinaryOpIC::SMI) { | 2851 result_type_ == BinaryOpIC::SMI) { |
| 2852 // Only allow smi results. | 2852 // Only allow smi results. |
| 2853 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); | 2853 BinaryOpStub_GenerateSmiCode( |
| 2854 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); |
| 2854 } else { | 2855 } else { |
| 2855 // Allow heap number result and don't make a transition if a heap number | 2856 // Allow heap number result and don't make a transition if a heap number |
| 2856 // cannot be allocated. | 2857 // cannot be allocated. |
| 2857 GenerateSmiCode(masm, | 2858 BinaryOpStub_GenerateSmiCode( |
| 2858 &call_runtime, | 2859 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, |
| 2859 &call_runtime, | 2860 mode_); |
| 2860 ALLOW_HEAPNUMBER_RESULTS); | |
| 2861 } | 2861 } |
| 2862 | 2862 |
| 2863 // Code falls through if the result is not returned as either a smi or heap | 2863 // Code falls through if the result is not returned as either a smi or heap |
| 2864 // number. | 2864 // number. |
| 2865 GenerateTypeTransition(masm); | 2865 GenerateTypeTransition(masm); |
| 2866 | 2866 |
| 2867 __ bind(&call_runtime); | 2867 __ bind(&call_runtime); |
| 2868 GenerateRegisterArgsPush(masm); |
| 2868 GenerateCallRuntime(masm); | 2869 GenerateCallRuntime(masm); |
| 2869 } | 2870 } |
| 2870 | 2871 |
| 2871 | 2872 |
| 2872 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { | |
| 2873 ASSERT(operands_type_ == BinaryOpIC::STRING); | |
| 2874 // Try to add arguments as strings, otherwise, transition to the generic | |
| 2875 // BinaryOpIC type. | |
| 2876 GenerateAddStrings(masm); | |
| 2877 GenerateTypeTransition(masm); | |
| 2878 } | |
| 2879 | |
| 2880 | |
| 2881 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { | 2873 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
| 2882 Label call_runtime; | 2874 Label call_runtime; |
| 2883 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); | 2875 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
| 2884 ASSERT(op_ == Token::ADD); | 2876 ASSERT(op_ == Token::ADD); |
| 2885 // If both arguments are strings, call the string add stub. | 2877 // If both arguments are strings, call the string add stub. |
| 2886 // Otherwise, do a transition. | 2878 // Otherwise, do a transition. |
| 2887 | 2879 |
| 2888 // Registers containing left and right operands respectively. | 2880 // Registers containing left and right operands respectively. |
| 2889 Register left = a1; | 2881 Register left = a1; |
| 2890 Register right = a0; | 2882 Register right = a0; |
| 2891 | 2883 |
| 2892 // Test if left operand is a string. | 2884 // Test if left operand is a string. |
| 2893 __ JumpIfSmi(left, &call_runtime); | 2885 __ JumpIfSmi(left, &call_runtime); |
| 2894 __ GetObjectType(left, a2, a2); | 2886 __ GetObjectType(left, a2, a2); |
| 2895 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); | 2887 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); |
| 2896 | 2888 |
| 2897 // Test if right operand is a string. | 2889 // Test if right operand is a string. |
| 2898 __ JumpIfSmi(right, &call_runtime); | 2890 __ JumpIfSmi(right, &call_runtime); |
| 2899 __ GetObjectType(right, a2, a2); | 2891 __ GetObjectType(right, a2, a2); |
| 2900 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); | 2892 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); |
| 2901 | 2893 |
| 2902 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); | 2894 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
| 2903 GenerateRegisterArgsPush(masm); | 2895 GenerateRegisterArgsPush(masm); |
| 2904 __ TailCallStub(&string_add_stub); | 2896 __ TailCallStub(&string_add_stub); |
| 2905 | 2897 |
| 2906 __ bind(&call_runtime); | 2898 __ bind(&call_runtime); |
| 2907 GenerateTypeTransition(masm); | 2899 GenerateTypeTransition(masm); |
| 2908 } | 2900 } |
| 2909 | 2901 |
| 2910 | 2902 |
| 2911 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 2903 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 2912 ASSERT(operands_type_ == BinaryOpIC::INT32); | 2904 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
| 2913 | 2905 |
| 2914 Register left = a1; | 2906 Register left = a1; |
| 2915 Register right = a0; | 2907 Register right = a0; |
| 2916 Register scratch1 = t3; | 2908 Register scratch1 = t3; |
| 2917 Register scratch2 = t5; | 2909 Register scratch2 = t5; |
| 2918 FPURegister double_scratch = f0; | 2910 FPURegister double_scratch = f0; |
| 2919 FPURegister single_scratch = f6; | 2911 FPURegister single_scratch = f6; |
| 2920 | 2912 |
| 2921 Register heap_number_result = no_reg; | 2913 Register heap_number_result = no_reg; |
| 2922 Register heap_number_map = t2; | 2914 Register heap_number_map = t2; |
| 2923 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2915 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2924 | 2916 |
| 2925 Label call_runtime; | 2917 Label call_runtime; |
| 2926 // Labels for type transition, used for wrong input or output types. | 2918 // Labels for type transition, used for wrong input or output types. |
| 2927 // Both label are currently actually bound to the same position. We use two | 2919 // Both label are currently actually bound to the same position. We use two |
| 2928 // different label to differentiate the cause leading to type transition. | 2920 // different label to differentiate the cause leading to type transition. |
| 2929 Label transition; | 2921 Label transition; |
| 2930 | 2922 |
| 2931 // Smi-smi fast case. | 2923 // Smi-smi fast case. |
| 2932 Label skip; | 2924 Label skip; |
| 2933 __ Or(scratch1, left, right); | 2925 __ Or(scratch1, left, right); |
| 2934 __ JumpIfNotSmi(scratch1, &skip); | 2926 __ JumpIfNotSmi(scratch1, &skip); |
| 2935 GenerateSmiSmiOperation(masm); | 2927 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); |
| 2936 // Fall through if the result is not a smi. | 2928 // Fall through if the result is not a smi. |
| 2937 __ bind(&skip); | 2929 __ bind(&skip); |
| 2938 | 2930 |
| 2939 switch (op_) { | 2931 switch (op_) { |
| 2940 case Token::ADD: | 2932 case Token::ADD: |
| 2941 case Token::SUB: | 2933 case Token::SUB: |
| 2942 case Token::MUL: | 2934 case Token::MUL: |
| 2943 case Token::DIV: | 2935 case Token::DIV: |
| 2944 case Token::MOD: { | 2936 case Token::MOD: { |
| 2937 // It could be that only SMIs have been seen at either the left |
| 2938 // or the right operand. For precise type feedback, patch the IC |
| 2939 // again if this changes. |
| 2940 if (left_type_ == BinaryOpIC::SMI) { |
| 2941 __ JumpIfNotSmi(left, &transition); |
| 2942 } |
| 2943 if (right_type_ == BinaryOpIC::SMI) { |
| 2944 __ JumpIfNotSmi(right, &transition); |
| 2945 } |
| 2945 // Load both operands and check that they are 32-bit integer. | 2946 // Load both operands and check that they are 32-bit integer. |
| 2946 // Jump to type transition if they are not. The registers a0 and a1 (right | 2947 // Jump to type transition if they are not. The registers a0 and a1 (right |
| 2947 // and left) are preserved for the runtime call. | 2948 // and left) are preserved for the runtime call. |
| 2948 FloatingPointHelper::Destination destination = | 2949 FloatingPointHelper::Destination destination = |
| 2949 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD) | 2950 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD) |
| 2950 ? FloatingPointHelper::kFPURegisters | 2951 ? FloatingPointHelper::kFPURegisters |
| 2951 : FloatingPointHelper::kCoreRegisters; | 2952 : FloatingPointHelper::kCoreRegisters; |
| 2952 | 2953 |
| 2953 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2954 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
| 2954 right, | 2955 right, |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3031 // DIV just falls through to allocating a heap number. | 3032 // DIV just falls through to allocating a heap number. |
| 3032 } | 3033 } |
| 3033 | 3034 |
| 3034 __ bind(&return_heap_number); | 3035 __ bind(&return_heap_number); |
| 3035 // Return a heap number, or fall through to type transition or runtime | 3036 // Return a heap number, or fall through to type transition or runtime |
| 3036 // call if we can't. | 3037 // call if we can't. |
| 3037 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER | 3038 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER |
| 3038 : BinaryOpIC::INT32)) { | 3039 : BinaryOpIC::INT32)) { |
| 3039 // We are using FPU registers so s0 is available. | 3040 // We are using FPU registers so s0 is available. |
| 3040 heap_number_result = s0; | 3041 heap_number_result = s0; |
| 3041 GenerateHeapResultAllocation(masm, | 3042 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 3042 heap_number_result, | 3043 heap_number_result, |
| 3043 heap_number_map, | 3044 heap_number_map, |
| 3044 scratch1, | 3045 scratch1, |
| 3045 scratch2, | 3046 scratch2, |
| 3046 &call_runtime); | 3047 &call_runtime, |
| 3048 mode_); |
| 3047 __ mov(v0, heap_number_result); | 3049 __ mov(v0, heap_number_result); |
| 3048 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 3050 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
| 3049 __ Ret(); | 3051 __ Ret(); |
| 3050 } | 3052 } |
| 3051 | 3053 |
| 3052 // A DIV operation expecting an integer result falls through | 3054 // A DIV operation expecting an integer result falls through |
| 3053 // to type transition. | 3055 // to type transition. |
| 3054 | 3056 |
| 3055 } else { | 3057 } else { |
| 3056 // We preserved a0 and a1 to be able to call runtime. | 3058 // We preserved a0 and a1 to be able to call runtime. |
| 3057 // Save the left value on the stack. | 3059 // Save the left value on the stack. |
| 3058 __ Push(t1, t0); | 3060 __ Push(t1, t0); |
| 3059 | 3061 |
| 3060 Label pop_and_call_runtime; | 3062 Label pop_and_call_runtime; |
| 3061 | 3063 |
| 3062 // Allocate a heap number to store the result. | 3064 // Allocate a heap number to store the result. |
| 3063 heap_number_result = s0; | 3065 heap_number_result = s0; |
| 3064 GenerateHeapResultAllocation(masm, | 3066 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 3065 heap_number_result, | 3067 heap_number_result, |
| 3066 heap_number_map, | 3068 heap_number_map, |
| 3067 scratch1, | 3069 scratch1, |
| 3068 scratch2, | 3070 scratch2, |
| 3069 &pop_and_call_runtime); | 3071 &pop_and_call_runtime, |
| 3072 mode_); |
| 3070 | 3073 |
| 3071 // Load the left value from the value saved on the stack. | 3074 // Load the left value from the value saved on the stack. |
| 3072 __ Pop(a1, a0); | 3075 __ Pop(a1, a0); |
| 3073 | 3076 |
| 3074 // Call the C function to handle the double operation. | 3077 // Call the C function to handle the double operation. |
| 3075 FloatingPointHelper::CallCCodeForDoubleOperation( | 3078 FloatingPointHelper::CallCCodeForDoubleOperation( |
| 3076 masm, op_, heap_number_result, scratch1); | 3079 masm, op_, heap_number_result, scratch1); |
| 3077 if (FLAG_debug_code) { | 3080 if (FLAG_debug_code) { |
| 3078 __ stop("Unreachable code."); | 3081 __ stop("Unreachable code."); |
| 3079 } | 3082 } |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3168 // Check if the result fits in a smi. | 3171 // Check if the result fits in a smi. |
| 3169 __ Addu(scratch1, a2, Operand(0x40000000)); | 3172 __ Addu(scratch1, a2, Operand(0x40000000)); |
| 3170 // If not try to return a heap number. (We know the result is an int32.) | 3173 // If not try to return a heap number. (We know the result is an int32.) |
| 3171 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); | 3174 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); |
| 3172 // Tag the result and return. | 3175 // Tag the result and return. |
| 3173 __ SmiTag(v0, a2); | 3176 __ SmiTag(v0, a2); |
| 3174 __ Ret(); | 3177 __ Ret(); |
| 3175 | 3178 |
| 3176 __ bind(&return_heap_number); | 3179 __ bind(&return_heap_number); |
| 3177 heap_number_result = t1; | 3180 heap_number_result = t1; |
| 3178 GenerateHeapResultAllocation(masm, | 3181 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 3179 heap_number_result, | 3182 heap_number_result, |
| 3180 heap_number_map, | 3183 heap_number_map, |
| 3181 scratch1, | 3184 scratch1, |
| 3182 scratch2, | 3185 scratch2, |
| 3183 &call_runtime); | 3186 &call_runtime, |
| 3187 mode_); |
| 3184 | 3188 |
| 3185 if (CpuFeatures::IsSupported(FPU)) { | 3189 if (CpuFeatures::IsSupported(FPU)) { |
| 3186 CpuFeatures::Scope scope(FPU); | 3190 CpuFeatures::Scope scope(FPU); |
| 3187 | 3191 |
| 3188 if (op_ != Token::SHR) { | 3192 if (op_ != Token::SHR) { |
| 3189 // Convert the result to a floating point value. | 3193 // Convert the result to a floating point value. |
| 3190 __ mtc1(a2, double_scratch); | 3194 __ mtc1(a2, double_scratch); |
| 3191 __ cvt_d_w(double_scratch, double_scratch); | 3195 __ cvt_d_w(double_scratch, double_scratch); |
| 3192 } else { | 3196 } else { |
| 3193 // The result must be interpreted as an unsigned 32-bit integer. | 3197 // The result must be interpreted as an unsigned 32-bit integer. |
| (...skipping 23 matching lines...) Expand all Loading... |
| 3217 // We never expect DIV to yield an integer result, so we always generate | 3221 // We never expect DIV to yield an integer result, so we always generate |
| 3218 // type transition code for DIV operations expecting an integer result: the | 3222 // type transition code for DIV operations expecting an integer result: the |
| 3219 // code will fall through to this type transition. | 3223 // code will fall through to this type transition. |
| 3220 if (transition.is_linked() || | 3224 if (transition.is_linked() || |
| 3221 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { | 3225 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { |
| 3222 __ bind(&transition); | 3226 __ bind(&transition); |
| 3223 GenerateTypeTransition(masm); | 3227 GenerateTypeTransition(masm); |
| 3224 } | 3228 } |
| 3225 | 3229 |
| 3226 __ bind(&call_runtime); | 3230 __ bind(&call_runtime); |
| 3231 GenerateRegisterArgsPush(masm); |
| 3227 GenerateCallRuntime(masm); | 3232 GenerateCallRuntime(masm); |
| 3228 } | 3233 } |
| 3229 | 3234 |
| 3230 | 3235 |
| 3231 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { | 3236 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
| 3232 Label call_runtime; | 3237 Label call_runtime; |
| 3233 | 3238 |
| 3234 if (op_ == Token::ADD) { | 3239 if (op_ == Token::ADD) { |
| 3235 // Handle string addition here, because it is the only operation | 3240 // Handle string addition here, because it is the only operation |
| 3236 // that does not do a ToNumber conversion on the operands. | 3241 // that does not do a ToNumber conversion on the operands. |
| (...skipping 18 matching lines...) Expand all Loading... |
| 3255 } else { | 3260 } else { |
| 3256 __ LoadRoot(a0, Heap::kNanValueRootIndex); | 3261 __ LoadRoot(a0, Heap::kNanValueRootIndex); |
| 3257 } | 3262 } |
| 3258 __ bind(&done); | 3263 __ bind(&done); |
| 3259 | 3264 |
| 3260 GenerateHeapNumberStub(masm); | 3265 GenerateHeapNumberStub(masm); |
| 3261 } | 3266 } |
| 3262 | 3267 |
| 3263 | 3268 |
| 3264 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 3269 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| 3265 Label call_runtime; | 3270 Label call_runtime, transition; |
| 3266 GenerateFPOperation(masm, false, &call_runtime, &call_runtime); | 3271 BinaryOpStub_GenerateFPOperation( |
| 3272 masm, left_type_, right_type_, false, |
| 3273 &transition, &call_runtime, &transition, op_, mode_); |
| 3274 |
| 3275 __ bind(&transition); |
| 3276 GenerateTypeTransition(masm); |
| 3267 | 3277 |
| 3268 __ bind(&call_runtime); | 3278 __ bind(&call_runtime); |
| 3279 GenerateRegisterArgsPush(masm); |
| 3269 GenerateCallRuntime(masm); | 3280 GenerateCallRuntime(masm); |
| 3270 } | 3281 } |
| 3271 | 3282 |
| 3272 | 3283 |
| 3273 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 3284 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| 3274 Label call_runtime, call_string_add_or_runtime; | 3285 Label call_runtime, call_string_add_or_runtime, transition; |
| 3275 | 3286 |
| 3276 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 3287 BinaryOpStub_GenerateSmiCode( |
| 3288 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); |
| 3277 | 3289 |
| 3278 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); | 3290 BinaryOpStub_GenerateFPOperation( |
| 3291 masm, left_type_, right_type_, false, |
| 3292 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); |
| 3293 |
| 3294 __ bind(&transition); |
| 3295 GenerateTypeTransition(masm); |
| 3279 | 3296 |
| 3280 __ bind(&call_string_add_or_runtime); | 3297 __ bind(&call_string_add_or_runtime); |
| 3281 if (op_ == Token::ADD) { | 3298 if (op_ == Token::ADD) { |
| 3282 GenerateAddStrings(masm); | 3299 GenerateAddStrings(masm); |
| 3283 } | 3300 } |
| 3284 | 3301 |
| 3285 __ bind(&call_runtime); | 3302 __ bind(&call_runtime); |
| 3303 GenerateRegisterArgsPush(masm); |
| 3286 GenerateCallRuntime(masm); | 3304 GenerateCallRuntime(masm); |
| 3287 } | 3305 } |
| 3288 | 3306 |
| 3289 | 3307 |
| 3290 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { | 3308 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
| 3291 ASSERT(op_ == Token::ADD); | 3309 ASSERT(op_ == Token::ADD); |
| 3292 Label left_not_string, call_runtime; | 3310 Label left_not_string, call_runtime; |
| 3293 | 3311 |
| 3294 Register left = a1; | 3312 Register left = a1; |
| 3295 Register right = a0; | 3313 Register right = a0; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3311 | 3329 |
| 3312 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); | 3330 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); |
| 3313 GenerateRegisterArgsPush(masm); | 3331 GenerateRegisterArgsPush(masm); |
| 3314 __ TailCallStub(&string_add_right_stub); | 3332 __ TailCallStub(&string_add_right_stub); |
| 3315 | 3333 |
| 3316 // At least one argument is not a string. | 3334 // At least one argument is not a string. |
| 3317 __ bind(&call_runtime); | 3335 __ bind(&call_runtime); |
| 3318 } | 3336 } |
| 3319 | 3337 |
| 3320 | 3338 |
| 3321 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { | 3339 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 3322 GenerateRegisterArgsPush(masm); | 3340 Register result, |
| 3323 switch (op_) { | 3341 Register heap_number_map, |
| 3324 case Token::ADD: | 3342 Register scratch1, |
| 3325 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); | 3343 Register scratch2, |
| 3326 break; | 3344 Label* gc_required, |
| 3327 case Token::SUB: | 3345 OverwriteMode mode) { |
| 3328 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); | |
| 3329 break; | |
| 3330 case Token::MUL: | |
| 3331 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); | |
| 3332 break; | |
| 3333 case Token::DIV: | |
| 3334 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); | |
| 3335 break; | |
| 3336 case Token::MOD: | |
| 3337 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); | |
| 3338 break; | |
| 3339 case Token::BIT_OR: | |
| 3340 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); | |
| 3341 break; | |
| 3342 case Token::BIT_AND: | |
| 3343 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); | |
| 3344 break; | |
| 3345 case Token::BIT_XOR: | |
| 3346 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); | |
| 3347 break; | |
| 3348 case Token::SAR: | |
| 3349 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); | |
| 3350 break; | |
| 3351 case Token::SHR: | |
| 3352 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); | |
| 3353 break; | |
| 3354 case Token::SHL: | |
| 3355 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); | |
| 3356 break; | |
| 3357 default: | |
| 3358 UNREACHABLE(); | |
| 3359 } | |
| 3360 } | |
| 3361 | |
| 3362 | |
| 3363 void BinaryOpStub::GenerateHeapResultAllocation( | |
| 3364 MacroAssembler* masm, | |
| 3365 Register result, | |
| 3366 Register heap_number_map, | |
| 3367 Register scratch1, | |
| 3368 Register scratch2, | |
| 3369 Label* gc_required) { | |
| 3370 | |
| 3371 // Code below will scratch result if allocation fails. To keep both arguments | 3346 // Code below will scratch result if allocation fails. To keep both arguments |
| 3372 // intact for the runtime call result cannot be one of these. | 3347 // intact for the runtime call result cannot be one of these. |
| 3373 ASSERT(!result.is(a0) && !result.is(a1)); | 3348 ASSERT(!result.is(a0) && !result.is(a1)); |
| 3374 | 3349 |
| 3375 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { | 3350 if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { |
| 3376 Label skip_allocation, allocated; | 3351 Label skip_allocation, allocated; |
| 3377 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0; | 3352 Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0; |
| 3378 // If the overwritable operand is already an object, we skip the | 3353 // If the overwritable operand is already an object, we skip the |
| 3379 // allocation of a heap number. | 3354 // allocation of a heap number. |
| 3380 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); | 3355 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
| 3381 // Allocate a heap number for the result. | 3356 // Allocate a heap number for the result. |
| 3382 __ AllocateHeapNumber( | 3357 __ AllocateHeapNumber( |
| 3383 result, scratch1, scratch2, heap_number_map, gc_required); | 3358 result, scratch1, scratch2, heap_number_map, gc_required); |
| 3384 __ Branch(&allocated); | 3359 __ Branch(&allocated); |
| 3385 __ bind(&skip_allocation); | 3360 __ bind(&skip_allocation); |
| 3386 // Use object holding the overwritable operand for result. | 3361 // Use object holding the overwritable operand for result. |
| 3387 __ mov(result, overwritable_operand); | 3362 __ mov(result, overwritable_operand); |
| 3388 __ bind(&allocated); | 3363 __ bind(&allocated); |
| 3389 } else { | 3364 } else { |
| 3390 ASSERT(mode_ == NO_OVERWRITE); | 3365 ASSERT(mode == NO_OVERWRITE); |
| 3391 __ AllocateHeapNumber( | 3366 __ AllocateHeapNumber( |
| 3392 result, scratch1, scratch2, heap_number_map, gc_required); | 3367 result, scratch1, scratch2, heap_number_map, gc_required); |
| 3393 } | 3368 } |
| 3394 } | 3369 } |
| 3395 | 3370 |
| 3396 | 3371 |
| 3397 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 3372 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 3398 __ Push(a1, a0); | 3373 __ Push(a1, a0); |
| 3399 } | 3374 } |
| 3400 | 3375 |
| (...skipping 2191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5592 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); | 5567 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); |
| 5593 __ bind(&do_call); | 5568 __ bind(&do_call); |
| 5594 // Set expected number of arguments to zero (not changing r0). | 5569 // Set expected number of arguments to zero (not changing r0). |
| 5595 __ li(a2, Operand(0, RelocInfo::NONE)); | 5570 __ li(a2, Operand(0, RelocInfo::NONE)); |
| 5596 __ SetCallKind(t1, CALL_AS_METHOD); | 5571 __ SetCallKind(t1, CALL_AS_METHOD); |
| 5597 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 5572 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 5598 RelocInfo::CODE_TARGET); | 5573 RelocInfo::CODE_TARGET); |
| 5599 } | 5574 } |
| 5600 | 5575 |
| 5601 | 5576 |
| 5602 // Unfortunately you have to run without snapshots to see most of these | |
| 5603 // names in the profile since most compare stubs end up in the snapshot. | |
| 5604 void CompareStub::PrintName(StringStream* stream) { | |
| 5605 ASSERT((lhs_.is(a0) && rhs_.is(a1)) || | |
| 5606 (lhs_.is(a1) && rhs_.is(a0))); | |
| 5607 const char* cc_name; | |
| 5608 switch (cc_) { | |
| 5609 case lt: cc_name = "LT"; break; | |
| 5610 case gt: cc_name = "GT"; break; | |
| 5611 case le: cc_name = "LE"; break; | |
| 5612 case ge: cc_name = "GE"; break; | |
| 5613 case eq: cc_name = "EQ"; break; | |
| 5614 case ne: cc_name = "NE"; break; | |
| 5615 default: cc_name = "UnknownCondition"; break; | |
| 5616 } | |
| 5617 bool is_equality = cc_ == eq || cc_ == ne; | |
| 5618 stream->Add("CompareStub_%s", cc_name); | |
| 5619 stream->Add(lhs_.is(a0) ? "_a0" : "_a1"); | |
| 5620 stream->Add(rhs_.is(a0) ? "_a0" : "_a1"); | |
| 5621 if (strict_ && is_equality) stream->Add("_STRICT"); | |
| 5622 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); | |
| 5623 if (!include_number_compare_) stream->Add("_NO_NUMBER"); | |
| 5624 if (!include_smi_compare_) stream->Add("_NO_SMI"); | |
| 5625 } | |
| 5626 | |
| 5627 | |
| 5628 int CompareStub::MinorKey() { | |
| 5629 // Encode the two parameters in a unique 16 bit value. | |
| 5630 ASSERT(static_cast<unsigned>(cc_) < (1 << 14)); | |
| 5631 ASSERT((lhs_.is(a0) && rhs_.is(a1)) || | |
| 5632 (lhs_.is(a1) && rhs_.is(a0))); | |
| 5633 return ConditionField::encode(static_cast<unsigned>(cc_)) | |
| 5634 | RegisterField::encode(lhs_.is(a0)) | |
| 5635 | StrictField::encode(strict_) | |
| 5636 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) | |
| 5637 | IncludeSmiCompareField::encode(include_smi_compare_); | |
| 5638 } | |
| 5639 | |
| 5640 | |
| 5641 // StringCharCodeAtGenerator. | 5577 // StringCharCodeAtGenerator. |
| 5642 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 5578 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 5643 Label flat_string; | 5579 Label flat_string; |
| 5644 Label ascii_string; | 5580 Label ascii_string; |
| 5645 Label got_char_code; | 5581 Label got_char_code; |
| 5646 Label sliced_string; | 5582 Label sliced_string; |
| 5647 | 5583 |
| 5648 ASSERT(!t0.is(index_)); | 5584 ASSERT(!t0.is(index_)); |
| 5649 ASSERT(!t0.is(result_)); | 5585 ASSERT(!t0.is(result_)); |
| 5650 ASSERT(!t0.is(object_)); | 5586 ASSERT(!t0.is(object_)); |
| (...skipping 1171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6822 __ And(scratch2, scratch2, scratch4); | 6758 __ And(scratch2, scratch2, scratch4); |
| 6823 __ Branch(slow, ne, scratch2, Operand(scratch4)); | 6759 __ Branch(slow, ne, scratch2, Operand(scratch4)); |
| 6824 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset)); | 6760 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset)); |
| 6825 __ sw(arg, MemOperand(sp, stack_offset)); | 6761 __ sw(arg, MemOperand(sp, stack_offset)); |
| 6826 | 6762 |
| 6827 __ bind(&done); | 6763 __ bind(&done); |
| 6828 } | 6764 } |
| 6829 | 6765 |
| 6830 | 6766 |
| 6831 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 6767 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 6832 ASSERT(state_ == CompareIC::SMIS); | 6768 ASSERT(state_ == CompareIC::SMI); |
| 6833 Label miss; | 6769 Label miss; |
| 6834 __ Or(a2, a1, a0); | 6770 __ Or(a2, a1, a0); |
| 6835 __ JumpIfNotSmi(a2, &miss); | 6771 __ JumpIfNotSmi(a2, &miss); |
| 6836 | 6772 |
| 6837 if (GetCondition() == eq) { | 6773 if (GetCondition() == eq) { |
| 6838 // For equality we do not care about the sign of the result. | 6774 // For equality we do not care about the sign of the result. |
| 6839 __ Subu(v0, a0, a1); | 6775 __ Subu(v0, a0, a1); |
| 6840 } else { | 6776 } else { |
| 6841 // Untag before subtracting to avoid handling overflow. | 6777 // Untag before subtracting to avoid handling overflow. |
| 6842 __ SmiUntag(a1); | 6778 __ SmiUntag(a1); |
| 6843 __ SmiUntag(a0); | 6779 __ SmiUntag(a0); |
| 6844 __ Subu(v0, a1, a0); | 6780 __ Subu(v0, a1, a0); |
| 6845 } | 6781 } |
| 6846 __ Ret(); | 6782 __ Ret(); |
| 6847 | 6783 |
| 6848 __ bind(&miss); | 6784 __ bind(&miss); |
| 6849 GenerateMiss(masm); | 6785 GenerateMiss(masm); |
| 6850 } | 6786 } |
| 6851 | 6787 |
| 6852 | 6788 |
| 6853 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { | 6789 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
| 6854 ASSERT(state_ == CompareIC::HEAP_NUMBERS); | 6790 ASSERT(state_ == CompareIC::HEAP_NUMBER); |
| 6855 | 6791 |
| 6856 Label generic_stub; | 6792 Label generic_stub; |
| 6857 Label unordered, maybe_undefined1, maybe_undefined2; | 6793 Label unordered, maybe_undefined1, maybe_undefined2; |
| 6858 Label miss; | 6794 Label miss; |
| 6859 __ And(a2, a1, Operand(a0)); | |
| 6860 __ JumpIfSmi(a2, &generic_stub); | |
| 6861 | 6795 |
| 6862 __ GetObjectType(a0, a2, a2); | 6796 if (left_ == CompareIC::SMI) { |
| 6863 __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE)); | 6797 __ JumpIfNotSmi(a1, &miss); |
| 6864 __ GetObjectType(a1, a2, a2); | 6798 } |
| 6865 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); | 6799 if (right_ == CompareIC::SMI) { |
| 6800 __ JumpIfNotSmi(a0, &miss); |
| 6801 } |
| 6866 | 6802 |
| 6867 // Inlining the double comparison and falling back to the general compare | 6803 // Inlining the double comparison and falling back to the general compare |
| 6868 // stub if NaN is involved or FPU is unsupported. | 6804 // stub if NaN is involved or FPU is unsupported. |
| 6869 if (CpuFeatures::IsSupported(FPU)) { | 6805 if (CpuFeatures::IsSupported(FPU)) { |
| 6870 CpuFeatures::Scope scope(FPU); | 6806 CpuFeatures::Scope scope(FPU); |
| 6871 | 6807 |
| 6872 // Load left and right operand. | 6808 // Load left and right operand. |
| 6809 Label done, left, left_smi, right_smi; |
| 6810 __ JumpIfSmi(a0, &right_smi); |
| 6811 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
| 6812 DONT_DO_SMI_CHECK); |
| 6813 __ Subu(a2, a0, Operand(kHeapObjectTag)); |
| 6814 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); |
| 6815 __ Branch(&left); |
| 6816 __ bind(&right_smi); |
| 6817 __ SmiUntag(a2, a0); // Can't clobber a0 yet. |
| 6818 FPURegister single_scratch = f6; |
| 6819 __ mtc1(a2, single_scratch); |
| 6820 __ cvt_d_w(f2, single_scratch); |
| 6821 |
| 6822 __ bind(&left); |
| 6823 __ JumpIfSmi(a1, &left_smi); |
| 6824 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
| 6825 DONT_DO_SMI_CHECK); |
| 6873 __ Subu(a2, a1, Operand(kHeapObjectTag)); | 6826 __ Subu(a2, a1, Operand(kHeapObjectTag)); |
| 6874 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); | 6827 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); |
| 6875 __ Subu(a2, a0, Operand(kHeapObjectTag)); | 6828 __ Branch(&done); |
| 6876 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); | 6829 __ bind(&left_smi); |
| 6830 __ SmiUntag(a2, a1); // Can't clobber a1 yet. |
| 6831 single_scratch = f8; |
| 6832 __ mtc1(a2, single_scratch); |
| 6833 __ cvt_d_w(f0, single_scratch); |
| 6834 |
| 6835 __ bind(&done); |
| 6877 | 6836 |
| 6878 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. | 6837 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. |
| 6879 Label fpu_eq, fpu_lt; | 6838 Label fpu_eq, fpu_lt; |
| 6880 // Test if equal, and also handle the unordered/NaN case. | 6839 // Test if equal, and also handle the unordered/NaN case. |
| 6881 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); | 6840 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); |
| 6882 | 6841 |
| 6883 // Test if less (unordered case is already handled). | 6842 // Test if less (unordered case is already handled). |
| 6884 __ BranchF(&fpu_lt, NULL, lt, f0, f2); | 6843 __ BranchF(&fpu_lt, NULL, lt, f0, f2); |
| 6885 | 6844 |
| 6886 // Otherwise it's greater, so just fall thru, and return. | 6845 // Otherwise it's greater, so just fall thru, and return. |
| 6887 __ li(v0, Operand(GREATER)); | 6846 __ li(v0, Operand(GREATER)); |
| 6888 __ Ret(); | 6847 __ Ret(); |
| 6889 | 6848 |
| 6890 __ bind(&fpu_eq); | 6849 __ bind(&fpu_eq); |
| 6891 __ li(v0, Operand(EQUAL)); | 6850 __ li(v0, Operand(EQUAL)); |
| 6892 __ Ret(); | 6851 __ Ret(); |
| 6893 | 6852 |
| 6894 __ bind(&fpu_lt); | 6853 __ bind(&fpu_lt); |
| 6895 __ li(v0, Operand(LESS)); | 6854 __ li(v0, Operand(LESS)); |
| 6896 __ Ret(); | 6855 __ Ret(); |
| 6897 } | 6856 } |
| 6898 | 6857 |
| 6899 __ bind(&unordered); | 6858 __ bind(&unordered); |
| 6900 | |
| 6901 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); | |
| 6902 __ bind(&generic_stub); | 6859 __ bind(&generic_stub); |
| 6860 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
| 6861 CompareIC::GENERIC); |
| 6903 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 6862 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 6904 | 6863 |
| 6905 __ bind(&maybe_undefined1); | 6864 __ bind(&maybe_undefined1); |
| 6906 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6865 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 6907 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 6866 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 6908 __ Branch(&miss, ne, a0, Operand(at)); | 6867 __ Branch(&miss, ne, a0, Operand(at)); |
| 6868 __ JumpIfSmi(a1, &unordered); |
| 6909 __ GetObjectType(a1, a2, a2); | 6869 __ GetObjectType(a1, a2, a2); |
| 6910 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); | 6870 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); |
| 6911 __ jmp(&unordered); | 6871 __ jmp(&unordered); |
| 6912 } | 6872 } |
| 6913 | 6873 |
| 6914 __ bind(&maybe_undefined2); | 6874 __ bind(&maybe_undefined2); |
| 6915 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6875 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 6916 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 6876 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 6917 __ Branch(&unordered, eq, a1, Operand(at)); | 6877 __ Branch(&unordered, eq, a1, Operand(at)); |
| 6918 } | 6878 } |
| 6919 | 6879 |
| 6920 __ bind(&miss); | 6880 __ bind(&miss); |
| 6921 GenerateMiss(masm); | 6881 GenerateMiss(masm); |
| 6922 } | 6882 } |
| 6923 | 6883 |
| 6924 | 6884 |
| 6925 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { | 6885 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { |
| 6926 ASSERT(state_ == CompareIC::SYMBOLS); | 6886 ASSERT(state_ == CompareIC::SYMBOL); |
| 6927 Label miss; | 6887 Label miss; |
| 6928 | 6888 |
| 6929 // Registers containing left and right operands respectively. | 6889 // Registers containing left and right operands respectively. |
| 6930 Register left = a1; | 6890 Register left = a1; |
| 6931 Register right = a0; | 6891 Register right = a0; |
| 6932 Register tmp1 = a2; | 6892 Register tmp1 = a2; |
| 6933 Register tmp2 = a3; | 6893 Register tmp2 = a3; |
| 6934 | 6894 |
| 6935 // Check that both operands are heap objects. | 6895 // Check that both operands are heap objects. |
| 6936 __ JumpIfEitherSmi(left, right, &miss); | 6896 __ JumpIfEitherSmi(left, right, &miss); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 6954 __ Ret(ne, left, Operand(right)); | 6914 __ Ret(ne, left, Operand(right)); |
| 6955 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 6915 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 6956 __ Ret(); | 6916 __ Ret(); |
| 6957 | 6917 |
| 6958 __ bind(&miss); | 6918 __ bind(&miss); |
| 6959 GenerateMiss(masm); | 6919 GenerateMiss(masm); |
| 6960 } | 6920 } |
| 6961 | 6921 |
| 6962 | 6922 |
| 6963 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 6923 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
| 6964 ASSERT(state_ == CompareIC::STRINGS); | 6924 ASSERT(state_ == CompareIC::STRING); |
| 6965 Label miss; | 6925 Label miss; |
| 6966 | 6926 |
| 6967 bool equality = Token::IsEqualityOp(op_); | 6927 bool equality = Token::IsEqualityOp(op_); |
| 6968 | 6928 |
| 6969 // Registers containing left and right operands respectively. | 6929 // Registers containing left and right operands respectively. |
| 6970 Register left = a1; | 6930 Register left = a1; |
| 6971 Register right = a0; | 6931 Register right = a0; |
| 6972 Register tmp1 = a2; | 6932 Register tmp1 = a2; |
| 6973 Register tmp2 = a3; | 6933 Register tmp2 = a3; |
| 6974 Register tmp3 = t0; | 6934 Register tmp3 = t0; |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7039 } else { | 6999 } else { |
| 7040 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 7000 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 7041 } | 7001 } |
| 7042 | 7002 |
| 7043 __ bind(&miss); | 7003 __ bind(&miss); |
| 7044 GenerateMiss(masm); | 7004 GenerateMiss(masm); |
| 7045 } | 7005 } |
| 7046 | 7006 |
| 7047 | 7007 |
| 7048 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 7008 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
| 7049 ASSERT(state_ == CompareIC::OBJECTS); | 7009 ASSERT(state_ == CompareIC::OBJECT); |
| 7050 Label miss; | 7010 Label miss; |
| 7051 __ And(a2, a1, Operand(a0)); | 7011 __ And(a2, a1, Operand(a0)); |
| 7052 __ JumpIfSmi(a2, &miss); | 7012 __ JumpIfSmi(a2, &miss); |
| 7053 | 7013 |
| 7054 __ GetObjectType(a0, a2, a2); | 7014 __ GetObjectType(a0, a2, a2); |
| 7055 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); | 7015 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
| 7056 __ GetObjectType(a1, a2, a2); | 7016 __ GetObjectType(a1, a2, a2); |
| 7057 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); | 7017 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
| 7058 | 7018 |
| 7059 ASSERT(GetCondition() == eq); | 7019 ASSERT(GetCondition() == eq); |
| (...skipping 775 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7835 __ Pop(ra, t1, a1); | 7795 __ Pop(ra, t1, a1); |
| 7836 __ Ret(); | 7796 __ Ret(); |
| 7837 } | 7797 } |
| 7838 | 7798 |
| 7839 | 7799 |
| 7840 #undef __ | 7800 #undef __ |
| 7841 | 7801 |
| 7842 } } // namespace v8::internal | 7802 } } // namespace v8::internal |
| 7843 | 7803 |
| 7844 #endif // V8_TARGET_ARCH_MIPS | 7804 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |