| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
| 8 | 8 |
| 9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 350 | 350 |
| 351 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | 351 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
| 352 // Update the static counter each time a new code stub is generated. | 352 // Update the static counter each time a new code stub is generated. |
| 353 isolate()->counters()->code_stubs()->Increment(); | 353 isolate()->counters()->code_stubs()->Increment(); |
| 354 | 354 |
| 355 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); | 355 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); |
| 356 int param_count = descriptor->GetEnvironmentParameterCount(); | 356 int param_count = descriptor->GetEnvironmentParameterCount(); |
| 357 { | 357 { |
| 358 // Call the runtime system in a fresh internal frame. | 358 // Call the runtime system in a fresh internal frame. |
| 359 FrameScope scope(masm, StackFrame::INTERNAL); | 359 FrameScope scope(masm, StackFrame::INTERNAL); |
| 360 ASSERT((param_count == 0) || | 360 DCHECK((param_count == 0) || |
| 361 a0.is(descriptor->GetEnvironmentParameterRegister(param_count - 1))); | 361 a0.is(descriptor->GetEnvironmentParameterRegister(param_count - 1))); |
| 362 // Push arguments, adjust sp. | 362 // Push arguments, adjust sp. |
| 363 __ Dsubu(sp, sp, Operand(param_count * kPointerSize)); | 363 __ Dsubu(sp, sp, Operand(param_count * kPointerSize)); |
| 364 for (int i = 0; i < param_count; ++i) { | 364 for (int i = 0; i < param_count; ++i) { |
| 365 // Store argument to stack. | 365 // Store argument to stack. |
| 366 __ sd(descriptor->GetEnvironmentParameterRegister(i), | 366 __ sd(descriptor->GetEnvironmentParameterRegister(i), |
| 367 MemOperand(sp, (param_count-1-i) * kPointerSize)); | 367 MemOperand(sp, (param_count-1-i) * kPointerSize)); |
| 368 } | 368 } |
| 369 ExternalReference miss = descriptor->miss_handler(); | 369 ExternalReference miss = descriptor->miss_handler(); |
| 370 __ CallExternalReference(miss, param_count); | 370 __ CallExternalReference(miss, param_count); |
| (...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 638 __ li(scratch_, Operand(non_smi_exponent)); | 638 __ li(scratch_, Operand(non_smi_exponent)); |
| 639 // Set the sign bit in scratch_ if the value was negative. | 639 // Set the sign bit in scratch_ if the value was negative. |
| 640 __ or_(scratch_, scratch_, sign_); | 640 __ or_(scratch_, scratch_, sign_); |
| 641 // Subtract from 0 if the value was negative. | 641 // Subtract from 0 if the value was negative. |
| 642 __ subu(at, zero_reg, the_int_); | 642 __ subu(at, zero_reg, the_int_); |
| 643 __ Movn(the_int_, at, sign_); | 643 __ Movn(the_int_, at, sign_); |
| 644 // We should be masking the implict first digit of the mantissa away here, | 644 // We should be masking the implict first digit of the mantissa away here, |
| 645 // but it just ends up combining harmlessly with the last digit of the | 645 // but it just ends up combining harmlessly with the last digit of the |
| 646 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get | 646 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get |
| 647 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | 647 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. |
| 648 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | 648 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
| 649 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 649 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
| 650 __ srl(at, the_int_, shift_distance); | 650 __ srl(at, the_int_, shift_distance); |
| 651 __ or_(scratch_, scratch_, at); | 651 __ or_(scratch_, scratch_, at); |
| 652 __ sw(scratch_, FieldMemOperand(the_heap_number_, | 652 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
| 653 HeapNumber::kExponentOffset)); | 653 HeapNumber::kExponentOffset)); |
| 654 __ sll(scratch_, the_int_, 32 - shift_distance); | 654 __ sll(scratch_, the_int_, 32 - shift_distance); |
| 655 __ Ret(USE_DELAY_SLOT); | 655 __ Ret(USE_DELAY_SLOT); |
| 656 __ sw(scratch_, FieldMemOperand(the_heap_number_, | 656 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
| 657 HeapNumber::kMantissaOffset)); | 657 HeapNumber::kMantissaOffset)); |
| 658 | 658 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 699 // Comparing JS objects with <=, >= is complicated. | 699 // Comparing JS objects with <=, >= is complicated. |
| 700 if (cc != eq) { | 700 if (cc != eq) { |
| 701 __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); | 701 __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 702 // Normally here we fall through to return_equal, but undefined is | 702 // Normally here we fall through to return_equal, but undefined is |
| 703 // special: (undefined == undefined) == true, but | 703 // special: (undefined == undefined) == true, but |
| 704 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 704 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
| 705 if (cc == less_equal || cc == greater_equal) { | 705 if (cc == less_equal || cc == greater_equal) { |
| 706 __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE)); | 706 __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE)); |
| 707 __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); | 707 __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); |
| 708 __ Branch(&return_equal, ne, a0, Operand(a6)); | 708 __ Branch(&return_equal, ne, a0, Operand(a6)); |
| 709 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 709 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 710 __ Ret(USE_DELAY_SLOT); | 710 __ Ret(USE_DELAY_SLOT); |
| 711 if (cc == le) { | 711 if (cc == le) { |
| 712 // undefined <= undefined should fail. | 712 // undefined <= undefined should fail. |
| 713 __ li(v0, Operand(GREATER)); | 713 __ li(v0, Operand(GREATER)); |
| 714 } else { | 714 } else { |
| 715 // undefined >= undefined should fail. | 715 // undefined >= undefined should fail. |
| 716 __ li(v0, Operand(LESS)); | 716 __ li(v0, Operand(LESS)); |
| 717 } | 717 } |
| 718 } | 718 } |
| 719 } | 719 } |
| 720 } | 720 } |
| 721 | 721 |
| 722 __ bind(&return_equal); | 722 __ bind(&return_equal); |
| 723 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 723 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 724 __ Ret(USE_DELAY_SLOT); | 724 __ Ret(USE_DELAY_SLOT); |
| 725 if (cc == less) { | 725 if (cc == less) { |
| 726 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. | 726 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. |
| 727 } else if (cc == greater) { | 727 } else if (cc == greater) { |
| 728 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. | 728 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. |
| 729 } else { | 729 } else { |
| 730 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. | 730 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. |
| 731 } | 731 } |
| 732 // For less and greater we don't have to check for NaN since the result of | 732 // For less and greater we don't have to check for NaN since the result of |
| 733 // x < x is false regardless. For the others here is some code to check | 733 // x < x is false regardless. For the others here is some code to check |
| (...skipping 17 matching lines...) Expand all Loading... |
| 751 // Or with all low-bits of mantissa. | 751 // Or with all low-bits of mantissa. |
| 752 __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); | 752 __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
| 753 __ Or(v0, a7, Operand(a6)); | 753 __ Or(v0, a7, Operand(a6)); |
| 754 // For equal we already have the right value in v0: Return zero (equal) | 754 // For equal we already have the right value in v0: Return zero (equal) |
| 755 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 755 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
| 756 // not (it's a NaN). For <= and >= we need to load v0 with the failing | 756 // not (it's a NaN). For <= and >= we need to load v0 with the failing |
| 757 // value if it's a NaN. | 757 // value if it's a NaN. |
| 758 if (cc != eq) { | 758 if (cc != eq) { |
| 759 // All-zero means Infinity means equal. | 759 // All-zero means Infinity means equal. |
| 760 __ Ret(eq, v0, Operand(zero_reg)); | 760 __ Ret(eq, v0, Operand(zero_reg)); |
| 761 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 761 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 762 __ Ret(USE_DELAY_SLOT); | 762 __ Ret(USE_DELAY_SLOT); |
| 763 if (cc == le) { | 763 if (cc == le) { |
| 764 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. | 764 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. |
| 765 } else { | 765 } else { |
| 766 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. | 766 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. |
| 767 } | 767 } |
| 768 } | 768 } |
| 769 } | 769 } |
| 770 // No fall through here. | 770 // No fall through here. |
| 771 | 771 |
| 772 __ bind(¬_identical); | 772 __ bind(¬_identical); |
| 773 } | 773 } |
| 774 | 774 |
| 775 | 775 |
| 776 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 776 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 777 Register lhs, | 777 Register lhs, |
| 778 Register rhs, | 778 Register rhs, |
| 779 Label* both_loaded_as_doubles, | 779 Label* both_loaded_as_doubles, |
| 780 Label* slow, | 780 Label* slow, |
| 781 bool strict) { | 781 bool strict) { |
| 782 ASSERT((lhs.is(a0) && rhs.is(a1)) || | 782 DCHECK((lhs.is(a0) && rhs.is(a1)) || |
| 783 (lhs.is(a1) && rhs.is(a0))); | 783 (lhs.is(a1) && rhs.is(a0))); |
| 784 | 784 |
| 785 Label lhs_is_smi; | 785 Label lhs_is_smi; |
| 786 __ JumpIfSmi(lhs, &lhs_is_smi); | 786 __ JumpIfSmi(lhs, &lhs_is_smi); |
| 787 // Rhs is a Smi. | 787 // Rhs is a Smi. |
| 788 // Check whether the non-smi is a heap number. | 788 // Check whether the non-smi is a heap number. |
| 789 __ GetObjectType(lhs, t0, t0); | 789 __ GetObjectType(lhs, t0, t0); |
| 790 if (strict) { | 790 if (strict) { |
| 791 // If lhs was not a number and rhs was a Smi then strict equality cannot | 791 // If lhs was not a number and rhs was a Smi then strict equality cannot |
| 792 // succeed. Return non-equal (lhs is already not zero). | 792 // succeed. Return non-equal (lhs is already not zero). |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 889 __ jmp(both_loaded_as_doubles); | 889 __ jmp(both_loaded_as_doubles); |
| 890 } | 890 } |
| 891 | 891 |
| 892 | 892 |
| 893 // Fast negative check for internalized-to-internalized equality. | 893 // Fast negative check for internalized-to-internalized equality. |
| 894 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 894 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
| 895 Register lhs, | 895 Register lhs, |
| 896 Register rhs, | 896 Register rhs, |
| 897 Label* possible_strings, | 897 Label* possible_strings, |
| 898 Label* not_both_strings) { | 898 Label* not_both_strings) { |
| 899 ASSERT((lhs.is(a0) && rhs.is(a1)) || | 899 DCHECK((lhs.is(a0) && rhs.is(a1)) || |
| 900 (lhs.is(a1) && rhs.is(a0))); | 900 (lhs.is(a1) && rhs.is(a0))); |
| 901 | 901 |
| 902 // a2 is object type of rhs. | 902 // a2 is object type of rhs. |
| 903 Label object_test; | 903 Label object_test; |
| 904 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 904 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
| 905 __ And(at, a2, Operand(kIsNotStringMask)); | 905 __ And(at, a2, Operand(kIsNotStringMask)); |
| 906 __ Branch(&object_test, ne, at, Operand(zero_reg)); | 906 __ Branch(&object_test, ne, at, Operand(zero_reg)); |
| 907 __ And(at, a2, Operand(kIsNotInternalizedMask)); | 907 __ And(at, a2, Operand(kIsNotInternalizedMask)); |
| 908 __ Branch(possible_strings, ne, at, Operand(zero_reg)); | 908 __ Branch(possible_strings, ne, at, Operand(zero_reg)); |
| 909 __ GetObjectType(rhs, a3, a3); | 909 __ GetObjectType(rhs, a3, a3); |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 981 // NOTICE! This code is only reached after a smi-fast-case check, so | 981 // NOTICE! This code is only reached after a smi-fast-case check, so |
| 982 // it is certain that at least one operand isn't a smi. | 982 // it is certain that at least one operand isn't a smi. |
| 983 | 983 |
| 984 // Handle the case where the objects are identical. Either returns the answer | 984 // Handle the case where the objects are identical. Either returns the answer |
| 985 // or goes to slow. Only falls through if the objects were not identical. | 985 // or goes to slow. Only falls through if the objects were not identical. |
| 986 EmitIdenticalObjectComparison(masm, &slow, cc); | 986 EmitIdenticalObjectComparison(masm, &slow, cc); |
| 987 | 987 |
| 988 // If either is a Smi (we know that not both are), then they can only | 988 // If either is a Smi (we know that not both are), then they can only |
| 989 // be strictly equal if the other is a HeapNumber. | 989 // be strictly equal if the other is a HeapNumber. |
| 990 STATIC_ASSERT(kSmiTag == 0); | 990 STATIC_ASSERT(kSmiTag == 0); |
| 991 ASSERT_EQ(0, Smi::FromInt(0)); | 991 DCHECK_EQ(0, Smi::FromInt(0)); |
| 992 __ And(a6, lhs, Operand(rhs)); | 992 __ And(a6, lhs, Operand(rhs)); |
| 993 __ JumpIfNotSmi(a6, ¬_smis, a4); | 993 __ JumpIfNotSmi(a6, ¬_smis, a4); |
| 994 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | 994 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
| 995 // 1) Return the answer. | 995 // 1) Return the answer. |
| 996 // 2) Go to slow. | 996 // 2) Go to slow. |
| 997 // 3) Fall through to both_loaded_as_doubles. | 997 // 3) Fall through to both_loaded_as_doubles. |
| 998 // 4) Jump to rhs_not_nan. | 998 // 4) Jump to rhs_not_nan. |
| 999 // In cases 3 and 4 we have found out we were dealing with a number-number | 999 // In cases 3 and 4 we have found out we were dealing with a number-number |
| 1000 // comparison and the numbers have been loaded into f12 and f14 as doubles, | 1000 // comparison and the numbers have been loaded into f12 and f14 as doubles, |
| 1001 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. | 1001 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1037 __ mov(v0, a6); // Return EQUAL as result. | 1037 __ mov(v0, a6); // Return EQUAL as result. |
| 1038 | 1038 |
| 1039 __ mov(v0, a5); // Return GREATER as result. | 1039 __ mov(v0, a5); // Return GREATER as result. |
| 1040 __ bind(&skip); | 1040 __ bind(&skip); |
| 1041 } | 1041 } |
| 1042 __ Ret(); | 1042 __ Ret(); |
| 1043 | 1043 |
| 1044 __ bind(&nan); | 1044 __ bind(&nan); |
| 1045 // NaN comparisons always fail. | 1045 // NaN comparisons always fail. |
| 1046 // Load whatever we need in v0 to make the comparison fail. | 1046 // Load whatever we need in v0 to make the comparison fail. |
| 1047 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 1047 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 1048 __ Ret(USE_DELAY_SLOT); | 1048 __ Ret(USE_DELAY_SLOT); |
| 1049 if (cc == lt || cc == le) { | 1049 if (cc == lt || cc == le) { |
| 1050 __ li(v0, Operand(GREATER)); | 1050 __ li(v0, Operand(GREATER)); |
| 1051 } else { | 1051 } else { |
| 1052 __ li(v0, Operand(LESS)); | 1052 __ li(v0, Operand(LESS)); |
| 1053 } | 1053 } |
| 1054 | 1054 |
| 1055 | 1055 |
| 1056 __ bind(¬_smis); | 1056 __ bind(¬_smis); |
| 1057 // At this point we know we are dealing with two different objects, | 1057 // At this point we know we are dealing with two different objects, |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1119 // Figure out which native to call and setup the arguments. | 1119 // Figure out which native to call and setup the arguments. |
| 1120 Builtins::JavaScript native; | 1120 Builtins::JavaScript native; |
| 1121 if (cc == eq) { | 1121 if (cc == eq) { |
| 1122 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | 1122 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
| 1123 } else { | 1123 } else { |
| 1124 native = Builtins::COMPARE; | 1124 native = Builtins::COMPARE; |
| 1125 int ncr; // NaN compare result. | 1125 int ncr; // NaN compare result. |
| 1126 if (cc == lt || cc == le) { | 1126 if (cc == lt || cc == le) { |
| 1127 ncr = GREATER; | 1127 ncr = GREATER; |
| 1128 } else { | 1128 } else { |
| 1129 ASSERT(cc == gt || cc == ge); // Remaining cases. | 1129 DCHECK(cc == gt || cc == ge); // Remaining cases. |
| 1130 ncr = LESS; | 1130 ncr = LESS; |
| 1131 } | 1131 } |
| 1132 __ li(a0, Operand(Smi::FromInt(ncr))); | 1132 __ li(a0, Operand(Smi::FromInt(ncr))); |
| 1133 __ push(a0); | 1133 __ push(a0); |
| 1134 } | 1134 } |
| 1135 | 1135 |
| 1136 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 1136 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| 1137 // tagged as a small integer. | 1137 // tagged as a small integer. |
| 1138 __ InvokeBuiltin(native, JUMP_FUNCTION); | 1138 __ InvokeBuiltin(native, JUMP_FUNCTION); |
| 1139 | 1139 |
| (...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1370 __ bind(&call_runtime); | 1370 __ bind(&call_runtime); |
| 1371 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); | 1371 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); |
| 1372 | 1372 |
| 1373 // The stub is called from non-optimized code, which expects the result | 1373 // The stub is called from non-optimized code, which expects the result |
| 1374 // as heap number in exponent. | 1374 // as heap number in exponent. |
| 1375 __ bind(&done); | 1375 __ bind(&done); |
| 1376 __ AllocateHeapNumber( | 1376 __ AllocateHeapNumber( |
| 1377 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); | 1377 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); |
| 1378 __ sdc1(double_result, | 1378 __ sdc1(double_result, |
| 1379 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 1379 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
| 1380 ASSERT(heapnumber.is(v0)); | 1380 DCHECK(heapnumber.is(v0)); |
| 1381 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); | 1381 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); |
| 1382 __ DropAndRet(2); | 1382 __ DropAndRet(2); |
| 1383 } else { | 1383 } else { |
| 1384 __ push(ra); | 1384 __ push(ra); |
| 1385 { | 1385 { |
| 1386 AllowExternalCallThatCantCauseGC scope(masm); | 1386 AllowExternalCallThatCantCauseGC scope(masm); |
| 1387 __ PrepareCallCFunction(0, 2, scratch); | 1387 __ PrepareCallCFunction(0, 2, scratch); |
| 1388 __ MovToFloatParameters(double_base, double_exponent); | 1388 __ MovToFloatParameters(double_base, double_exponent); |
| 1389 __ CallCFunction( | 1389 __ CallCFunction( |
| 1390 ExternalReference::power_double_double_function(isolate()), | 1390 ExternalReference::power_double_double_function(isolate()), |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1517 masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. | 1517 masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. |
| 1518 // Stack space reservation moved to the branch delay slot below. | 1518 // Stack space reservation moved to the branch delay slot below. |
| 1519 // Stack is still aligned. | 1519 // Stack is still aligned. |
| 1520 | 1520 |
| 1521 // Call the C routine. | 1521 // Call the C routine. |
| 1522 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. | 1522 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. |
| 1523 masm->jalr(t9); | 1523 masm->jalr(t9); |
| 1524 // Set up sp in the delay slot. | 1524 // Set up sp in the delay slot. |
| 1525 masm->daddiu(sp, sp, -kCArgsSlotsSize); | 1525 masm->daddiu(sp, sp, -kCArgsSlotsSize); |
| 1526 // Make sure the stored 'ra' points to this position. | 1526 // Make sure the stored 'ra' points to this position. |
| 1527 ASSERT_EQ(kNumInstructionsToJump, | 1527 DCHECK_EQ(kNumInstructionsToJump, |
| 1528 masm->InstructionsGeneratedSince(&find_ra)); | 1528 masm->InstructionsGeneratedSince(&find_ra)); |
| 1529 } | 1529 } |
| 1530 | 1530 |
| 1531 // Runtime functions should not return 'the hole'. Allowing it to escape may | 1531 // Runtime functions should not return 'the hole'. Allowing it to escape may |
| 1532 // lead to crashes in the IC code later. | 1532 // lead to crashes in the IC code later. |
| 1533 if (FLAG_debug_code) { | 1533 if (FLAG_debug_code) { |
| 1534 Label okay; | 1534 Label okay; |
| 1535 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); | 1535 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); |
| 1536 __ Branch(&okay, ne, v0, Operand(a4)); | 1536 __ Branch(&okay, ne, v0, Operand(a4)); |
| 1537 __ stop("The hole escaped"); | 1537 __ stop("The hole escaped"); |
| (...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1772 // Uses registers a0 to a4. | 1772 // Uses registers a0 to a4. |
| 1773 // Expected input (depending on whether args are in registers or on the stack): | 1773 // Expected input (depending on whether args are in registers or on the stack): |
| 1774 // * object: a0 or at sp + 1 * kPointerSize. | 1774 // * object: a0 or at sp + 1 * kPointerSize. |
| 1775 // * function: a1 or at sp. | 1775 // * function: a1 or at sp. |
| 1776 // | 1776 // |
| 1777 // An inlined call site may have been generated before calling this stub. | 1777 // An inlined call site may have been generated before calling this stub. |
| 1778 // In this case the offset to the inline site to patch is passed on the stack, | 1778 // In this case the offset to the inline site to patch is passed on the stack, |
| 1779 // in the safepoint slot for register a4. | 1779 // in the safepoint slot for register a4. |
| 1780 void InstanceofStub::Generate(MacroAssembler* masm) { | 1780 void InstanceofStub::Generate(MacroAssembler* masm) { |
| 1781 // Call site inlining and patching implies arguments in registers. | 1781 // Call site inlining and patching implies arguments in registers. |
| 1782 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); | 1782 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
| 1783 // ReturnTrueFalse is only implemented for inlined call sites. | 1783 // ReturnTrueFalse is only implemented for inlined call sites. |
| 1784 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); | 1784 DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); |
| 1785 | 1785 |
| 1786 // Fixed register usage throughout the stub: | 1786 // Fixed register usage throughout the stub: |
| 1787 const Register object = a0; // Object (lhs). | 1787 const Register object = a0; // Object (lhs). |
| 1788 Register map = a3; // Map of the object. | 1788 Register map = a3; // Map of the object. |
| 1789 const Register function = a1; // Function (rhs). | 1789 const Register function = a1; // Function (rhs). |
| 1790 const Register prototype = a4; // Prototype of the function. | 1790 const Register prototype = a4; // Prototype of the function. |
| 1791 const Register inline_site = t1; | 1791 const Register inline_site = t1; |
| 1792 const Register scratch = a2; | 1792 const Register scratch = a2; |
| 1793 | 1793 |
| 1794 const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize; | 1794 const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize; |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1824 // Check that the function prototype is a JS object. | 1824 // Check that the function prototype is a JS object. |
| 1825 __ JumpIfSmi(prototype, &slow); | 1825 __ JumpIfSmi(prototype, &slow); |
| 1826 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); | 1826 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
| 1827 | 1827 |
| 1828 // Update the global instanceof or call site inlined cache with the current | 1828 // Update the global instanceof or call site inlined cache with the current |
| 1829 // map and function. The cached answer will be set when it is known below. | 1829 // map and function. The cached answer will be set when it is known below. |
| 1830 if (!HasCallSiteInlineCheck()) { | 1830 if (!HasCallSiteInlineCheck()) { |
| 1831 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 1831 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
| 1832 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | 1832 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
| 1833 } else { | 1833 } else { |
| 1834 ASSERT(HasArgsInRegisters()); | 1834 DCHECK(HasArgsInRegisters()); |
| 1835 // Patch the (relocated) inlined map check. | 1835 // Patch the (relocated) inlined map check. |
| 1836 | 1836 |
| 1837 // The offset was stored in a4 safepoint slot. | 1837 // The offset was stored in a4 safepoint slot. |
| 1838 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). | 1838 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). |
| 1839 __ LoadFromSafepointRegisterSlot(scratch, a4); | 1839 __ LoadFromSafepointRegisterSlot(scratch, a4); |
| 1840 __ Dsubu(inline_site, ra, scratch); | 1840 __ Dsubu(inline_site, ra, scratch); |
| 1841 // Get the map location in scratch and patch it. | 1841 // Get the map location in scratch and patch it. |
| 1842 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch. | 1842 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch. |
| 1843 __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset)); | 1843 __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset)); |
| 1844 } | 1844 } |
| 1845 | 1845 |
| 1846 // Register mapping: a3 is object map and a4 is function prototype. | 1846 // Register mapping: a3 is object map and a4 is function prototype. |
| 1847 // Get prototype of object into a2. | 1847 // Get prototype of object into a2. |
| 1848 __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 1848 __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
| 1849 | 1849 |
| 1850 // We don't need map any more. Use it as a scratch register. | 1850 // We don't need map any more. Use it as a scratch register. |
| 1851 Register scratch2 = map; | 1851 Register scratch2 = map; |
| 1852 map = no_reg; | 1852 map = no_reg; |
| 1853 | 1853 |
| 1854 // Loop through the prototype chain looking for the function prototype. | 1854 // Loop through the prototype chain looking for the function prototype. |
| 1855 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); | 1855 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
| 1856 __ bind(&loop); | 1856 __ bind(&loop); |
| 1857 __ Branch(&is_instance, eq, scratch, Operand(prototype)); | 1857 __ Branch(&is_instance, eq, scratch, Operand(prototype)); |
| 1858 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); | 1858 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); |
| 1859 __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 1859 __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 1860 __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 1860 __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
| 1861 __ Branch(&loop); | 1861 __ Branch(&loop); |
| 1862 | 1862 |
| 1863 __ bind(&is_instance); | 1863 __ bind(&is_instance); |
| 1864 ASSERT(Smi::FromInt(0) == 0); | 1864 DCHECK(Smi::FromInt(0) == 0); |
| 1865 if (!HasCallSiteInlineCheck()) { | 1865 if (!HasCallSiteInlineCheck()) { |
| 1866 __ mov(v0, zero_reg); | 1866 __ mov(v0, zero_reg); |
| 1867 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 1867 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 1868 } else { | 1868 } else { |
| 1869 // Patch the call site to return true. | 1869 // Patch the call site to return true. |
| 1870 __ LoadRoot(v0, Heap::kTrueValueRootIndex); | 1870 __ LoadRoot(v0, Heap::kTrueValueRootIndex); |
| 1871 __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); | 1871 __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
| 1872 // Get the boolean result location in scratch and patch it. | 1872 // Get the boolean result location in scratch and patch it. |
| 1873 __ PatchRelocatedValue(inline_site, scratch, v0); | 1873 __ PatchRelocatedValue(inline_site, scratch, v0); |
| 1874 | 1874 |
| 1875 if (!ReturnTrueFalseObject()) { | 1875 if (!ReturnTrueFalseObject()) { |
| 1876 ASSERT_EQ(Smi::FromInt(0), 0); | 1876 DCHECK_EQ(Smi::FromInt(0), 0); |
| 1877 __ mov(v0, zero_reg); | 1877 __ mov(v0, zero_reg); |
| 1878 } | 1878 } |
| 1879 } | 1879 } |
| 1880 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 1880 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
| 1881 | 1881 |
| 1882 __ bind(&is_not_instance); | 1882 __ bind(&is_not_instance); |
| 1883 if (!HasCallSiteInlineCheck()) { | 1883 if (!HasCallSiteInlineCheck()) { |
| 1884 __ li(v0, Operand(Smi::FromInt(1))); | 1884 __ li(v0, Operand(Smi::FromInt(1))); |
| 1885 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 1885 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 1886 } else { | 1886 } else { |
| (...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2086 __ bind(&skip_min); | 2086 __ bind(&skip_min); |
| 2087 | 2087 |
| 2088 __ bind(&try_allocate); | 2088 __ bind(&try_allocate); |
| 2089 | 2089 |
| 2090 // Compute the sizes of backing store, parameter map, and arguments object. | 2090 // Compute the sizes of backing store, parameter map, and arguments object. |
| 2091 // 1. Parameter map, has 2 extra words containing context and backing store. | 2091 // 1. Parameter map, has 2 extra words containing context and backing store. |
| 2092 const int kParameterMapHeaderSize = | 2092 const int kParameterMapHeaderSize = |
| 2093 FixedArray::kHeaderSize + 2 * kPointerSize; | 2093 FixedArray::kHeaderSize + 2 * kPointerSize; |
| 2094 // If there are no mapped parameters, we do not need the parameter_map. | 2094 // If there are no mapped parameters, we do not need the parameter_map. |
| 2095 Label param_map_size; | 2095 Label param_map_size; |
| 2096 ASSERT_EQ(0, Smi::FromInt(0)); | 2096 DCHECK_EQ(0, Smi::FromInt(0)); |
| 2097 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); | 2097 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); |
| 2098 __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0. | 2098 __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0. |
| 2099 __ SmiScale(t1, a1, kPointerSizeLog2); | 2099 __ SmiScale(t1, a1, kPointerSizeLog2); |
| 2100 __ daddiu(t1, t1, kParameterMapHeaderSize); | 2100 __ daddiu(t1, t1, kParameterMapHeaderSize); |
| 2101 __ bind(¶m_map_size); | 2101 __ bind(¶m_map_size); |
| 2102 | 2102 |
| 2103 // 2. Backing store. | 2103 // 2. Backing store. |
| 2104 __ SmiScale(t2, a2, kPointerSizeLog2); | 2104 __ SmiScale(t2, a2, kPointerSizeLog2); |
| 2105 __ Daddu(t1, t1, Operand(t2)); | 2105 __ Daddu(t1, t1, Operand(t2)); |
| 2106 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize)); | 2106 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize)); |
| (...skipping 483 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2590 __ daddu(a6, a0, a2); | 2590 __ daddu(a6, a0, a2); |
| 2591 | 2591 |
| 2592 // Argument 6: Set the number of capture registers to zero to force global | 2592 // Argument 6: Set the number of capture registers to zero to force global |
| 2593 // regexps to behave as non-global. This does not affect non-global regexps. | 2593 // regexps to behave as non-global. This does not affect non-global regexps. |
| 2594 __ mov(a5, zero_reg); | 2594 __ mov(a5, zero_reg); |
| 2595 | 2595 |
| 2596 // Argument 5: static offsets vector buffer. | 2596 // Argument 5: static offsets vector buffer. |
| 2597 __ li(a4, Operand( | 2597 __ li(a4, Operand( |
| 2598 ExternalReference::address_of_static_offsets_vector(isolate()))); | 2598 ExternalReference::address_of_static_offsets_vector(isolate()))); |
| 2599 } else { // O32. | 2599 } else { // O32. |
| 2600 ASSERT(kMipsAbi == kO32); | 2600 DCHECK(kMipsAbi == kO32); |
| 2601 | 2601 |
| 2602 // Argument 9: Pass current isolate address. | 2602 // Argument 9: Pass current isolate address. |
| 2603 // CFunctionArgumentOperand handles MIPS stack argument slots. | 2603 // CFunctionArgumentOperand handles MIPS stack argument slots. |
| 2604 __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); | 2604 __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); |
| 2605 __ sd(a0, MemOperand(sp, 5 * kPointerSize)); | 2605 __ sd(a0, MemOperand(sp, 5 * kPointerSize)); |
| 2606 | 2606 |
| 2607 // Argument 8: Indicate that this is a direct call from JavaScript. | 2607 // Argument 8: Indicate that this is a direct call from JavaScript. |
| 2608 __ li(a0, Operand(1)); | 2608 __ li(a0, Operand(1)); |
| 2609 __ sd(a0, MemOperand(sp, 4 * kPointerSize)); | 2609 __ sd(a0, MemOperand(sp, 4 * kPointerSize)); |
| 2610 | 2610 |
| (...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2841 static void GenerateRecordCallTarget(MacroAssembler* masm) { | 2841 static void GenerateRecordCallTarget(MacroAssembler* masm) { |
| 2842 // Cache the called function in a feedback vector slot. Cache states | 2842 // Cache the called function in a feedback vector slot. Cache states |
| 2843 // are uninitialized, monomorphic (indicated by a JSFunction), and | 2843 // are uninitialized, monomorphic (indicated by a JSFunction), and |
| 2844 // megamorphic. | 2844 // megamorphic. |
| 2845 // a0 : number of arguments to the construct function | 2845 // a0 : number of arguments to the construct function |
| 2846 // a1 : the function to call | 2846 // a1 : the function to call |
| 2847 // a2 : Feedback vector | 2847 // a2 : Feedback vector |
| 2848 // a3 : slot in feedback vector (Smi) | 2848 // a3 : slot in feedback vector (Smi) |
| 2849 Label initialize, done, miss, megamorphic, not_array_function; | 2849 Label initialize, done, miss, megamorphic, not_array_function; |
| 2850 | 2850 |
| 2851 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | 2851 DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), |
| 2852 masm->isolate()->heap()->megamorphic_symbol()); | 2852 masm->isolate()->heap()->megamorphic_symbol()); |
| 2853 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), | 2853 DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), |
| 2854 masm->isolate()->heap()->uninitialized_symbol()); | 2854 masm->isolate()->heap()->uninitialized_symbol()); |
| 2855 | 2855 |
| 2856 // Load the cache state into a4. | 2856 // Load the cache state into a4. |
| 2857 __ dsrl(a4, a3, 32 - kPointerSizeLog2); | 2857 __ dsrl(a4, a3, 32 - kPointerSizeLog2); |
| 2858 __ Daddu(a4, a2, Operand(a4)); | 2858 __ Daddu(a4, a2, Operand(a4)); |
| 2859 __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize)); | 2859 __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize)); |
| 2860 | 2860 |
| 2861 // A monomorphic cache hit or an already megamorphic state: invoke the | 2861 // A monomorphic cache hit or an already megamorphic state: invoke the |
| 2862 // function without changing the state. | 2862 // function without changing the state. |
| 2863 __ Branch(&done, eq, a4, Operand(a1)); | 2863 __ Branch(&done, eq, a4, Operand(a1)); |
| (...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3118 } | 3118 } |
| 3119 | 3119 |
| 3120 | 3120 |
| 3121 // StringCharCodeAtGenerator. | 3121 // StringCharCodeAtGenerator. |
| 3122 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 3122 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 3123 Label flat_string; | 3123 Label flat_string; |
| 3124 Label ascii_string; | 3124 Label ascii_string; |
| 3125 Label got_char_code; | 3125 Label got_char_code; |
| 3126 Label sliced_string; | 3126 Label sliced_string; |
| 3127 | 3127 |
| 3128 ASSERT(!a4.is(index_)); | 3128 DCHECK(!a4.is(index_)); |
| 3129 ASSERT(!a4.is(result_)); | 3129 DCHECK(!a4.is(result_)); |
| 3130 ASSERT(!a4.is(object_)); | 3130 DCHECK(!a4.is(object_)); |
| 3131 | 3131 |
| 3132 // If the receiver is a smi trigger the non-string case. | 3132 // If the receiver is a smi trigger the non-string case. |
| 3133 __ JumpIfSmi(object_, receiver_not_string_); | 3133 __ JumpIfSmi(object_, receiver_not_string_); |
| 3134 | 3134 |
| 3135 // Fetch the instance type of the receiver into result register. | 3135 // Fetch the instance type of the receiver into result register. |
| 3136 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 3136 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 3137 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 3137 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 3138 // If the receiver is not a string trigger the non-string case. | 3138 // If the receiver is not a string trigger the non-string case. |
| 3139 __ And(a4, result_, Operand(kIsNotStringMask)); | 3139 __ And(a4, result_, Operand(kIsNotStringMask)); |
| 3140 __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg)); | 3140 __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg)); |
| (...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3319 result_, | 3319 result_, |
| 3320 Heap::kHeapNumberMapRootIndex, | 3320 Heap::kHeapNumberMapRootIndex, |
| 3321 index_not_number_, | 3321 index_not_number_, |
| 3322 DONT_DO_SMI_CHECK); | 3322 DONT_DO_SMI_CHECK); |
| 3323 call_helper.BeforeCall(masm); | 3323 call_helper.BeforeCall(masm); |
| 3324 // Consumed by runtime conversion function: | 3324 // Consumed by runtime conversion function: |
| 3325 __ Push(object_, index_); | 3325 __ Push(object_, index_); |
| 3326 if (index_flags_ == STRING_INDEX_IS_NUMBER) { | 3326 if (index_flags_ == STRING_INDEX_IS_NUMBER) { |
| 3327 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | 3327 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); |
| 3328 } else { | 3328 } else { |
| 3329 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | 3329 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
| 3330 // NumberToSmi discards numbers that are not exact integers. | 3330 // NumberToSmi discards numbers that are not exact integers. |
| 3331 __ CallRuntime(Runtime::kNumberToSmi, 1); | 3331 __ CallRuntime(Runtime::kNumberToSmi, 1); |
| 3332 } | 3332 } |
| 3333 | 3333 |
| 3334 // Save the conversion result before the pop instructions below | 3334 // Save the conversion result before the pop instructions below |
| 3335 // have a chance to overwrite it. | 3335 // have a chance to overwrite it. |
| 3336 | 3336 |
| 3337 __ Move(index_, v0); | 3337 __ Move(index_, v0); |
| 3338 __ pop(object_); | 3338 __ pop(object_); |
| 3339 // Reload the instance type. | 3339 // Reload the instance type. |
| (...skipping 22 matching lines...) Expand all Loading... |
| 3362 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | 3362 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); |
| 3363 } | 3363 } |
| 3364 | 3364 |
| 3365 | 3365 |
| 3366 // ------------------------------------------------------------------------- | 3366 // ------------------------------------------------------------------------- |
| 3367 // StringCharFromCodeGenerator | 3367 // StringCharFromCodeGenerator |
| 3368 | 3368 |
| 3369 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 3369 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
| 3370 // Fast case of Heap::LookupSingleCharacterStringFromCode. | 3370 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
| 3371 | 3371 |
| 3372 ASSERT(!a4.is(result_)); | 3372 DCHECK(!a4.is(result_)); |
| 3373 ASSERT(!a4.is(code_)); | 3373 DCHECK(!a4.is(code_)); |
| 3374 | 3374 |
| 3375 STATIC_ASSERT(kSmiTag == 0); | 3375 STATIC_ASSERT(kSmiTag == 0); |
| 3376 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); | 3376 DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); |
| 3377 __ And(a4, | 3377 __ And(a4, |
| 3378 code_, | 3378 code_, |
| 3379 Operand(kSmiTagMask | | 3379 Operand(kSmiTagMask | |
| 3380 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); | 3380 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); |
| 3381 __ Branch(&slow_case_, ne, a4, Operand(zero_reg)); | 3381 __ Branch(&slow_case_, ne, a4, Operand(zero_reg)); |
| 3382 | 3382 |
| 3383 | 3383 |
| 3384 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 3384 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
| 3385 // At this point code register contains smi tagged ASCII char code. | 3385 // At this point code register contains smi tagged ASCII char code. |
| 3386 STATIC_ASSERT(kSmiTag == 0); | 3386 STATIC_ASSERT(kSmiTag == 0); |
| (...skipping 364 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3751 __ bind(&strings_not_equal); | 3751 __ bind(&strings_not_equal); |
| 3752 // Can not put li in delayslot, it has multi instructions. | 3752 // Can not put li in delayslot, it has multi instructions. |
| 3753 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); | 3753 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); |
| 3754 __ Ret(); | 3754 __ Ret(); |
| 3755 | 3755 |
| 3756 // Check if the length is zero. | 3756 // Check if the length is zero. |
| 3757 Label compare_chars; | 3757 Label compare_chars; |
| 3758 __ bind(&check_zero_length); | 3758 __ bind(&check_zero_length); |
| 3759 STATIC_ASSERT(kSmiTag == 0); | 3759 STATIC_ASSERT(kSmiTag == 0); |
| 3760 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); | 3760 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); |
| 3761 ASSERT(is_int16((intptr_t)Smi::FromInt(EQUAL))); | 3761 DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL))); |
| 3762 __ Ret(USE_DELAY_SLOT); | 3762 __ Ret(USE_DELAY_SLOT); |
| 3763 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 3763 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 3764 | 3764 |
| 3765 // Compare characters. | 3765 // Compare characters. |
| 3766 __ bind(&compare_chars); | 3766 __ bind(&compare_chars); |
| 3767 | 3767 |
| 3768 GenerateAsciiCharsCompareLoop(masm, | 3768 GenerateAsciiCharsCompareLoop(masm, |
| 3769 left, right, length, scratch2, scratch3, v0, | 3769 left, right, length, scratch2, scratch3, v0, |
| 3770 &strings_not_equal); | 3770 &strings_not_equal); |
| 3771 | 3771 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 3794 STATIC_ASSERT(kSmiTag == 0); | 3794 STATIC_ASSERT(kSmiTag == 0); |
| 3795 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); | 3795 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); |
| 3796 | 3796 |
| 3797 // Compare loop. | 3797 // Compare loop. |
| 3798 GenerateAsciiCharsCompareLoop(masm, | 3798 GenerateAsciiCharsCompareLoop(masm, |
| 3799 left, right, min_length, scratch2, scratch4, v0, | 3799 left, right, min_length, scratch2, scratch4, v0, |
| 3800 &result_not_equal); | 3800 &result_not_equal); |
| 3801 | 3801 |
| 3802 // Compare lengths - strings up to min-length are equal. | 3802 // Compare lengths - strings up to min-length are equal. |
| 3803 __ bind(&compare_lengths); | 3803 __ bind(&compare_lengths); |
| 3804 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 3804 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
| 3805 // Use length_delta as result if it's zero. | 3805 // Use length_delta as result if it's zero. |
| 3806 __ mov(scratch2, length_delta); | 3806 __ mov(scratch2, length_delta); |
| 3807 __ mov(scratch4, zero_reg); | 3807 __ mov(scratch4, zero_reg); |
| 3808 __ mov(v0, zero_reg); | 3808 __ mov(v0, zero_reg); |
| 3809 | 3809 |
| 3810 __ bind(&result_not_equal); | 3810 __ bind(&result_not_equal); |
| 3811 // Conditionally update the result based either on length_delta or | 3811 // Conditionally update the result based either on length_delta or |
| 3812 // the last comparion performed in the loop above. | 3812 // the last comparion performed in the loop above. |
| 3813 Label ret; | 3813 Label ret; |
| 3814 __ Branch(&ret, eq, scratch2, Operand(scratch4)); | 3814 __ Branch(&ret, eq, scratch2, Operand(scratch4)); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3910 } | 3910 } |
| 3911 | 3911 |
| 3912 // Tail call into the stub that handles binary operations with allocation | 3912 // Tail call into the stub that handles binary operations with allocation |
| 3913 // sites. | 3913 // sites. |
| 3914 BinaryOpWithAllocationSiteStub stub(isolate(), state_); | 3914 BinaryOpWithAllocationSiteStub stub(isolate(), state_); |
| 3915 __ TailCallStub(&stub); | 3915 __ TailCallStub(&stub); |
| 3916 } | 3916 } |
| 3917 | 3917 |
| 3918 | 3918 |
| 3919 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 3919 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 3920 ASSERT(state_ == CompareIC::SMI); | 3920 DCHECK(state_ == CompareIC::SMI); |
| 3921 Label miss; | 3921 Label miss; |
| 3922 __ Or(a2, a1, a0); | 3922 __ Or(a2, a1, a0); |
| 3923 __ JumpIfNotSmi(a2, &miss); | 3923 __ JumpIfNotSmi(a2, &miss); |
| 3924 | 3924 |
| 3925 if (GetCondition() == eq) { | 3925 if (GetCondition() == eq) { |
| 3926 // For equality we do not care about the sign of the result. | 3926 // For equality we do not care about the sign of the result. |
| 3927 __ Ret(USE_DELAY_SLOT); | 3927 __ Ret(USE_DELAY_SLOT); |
| 3928 __ Dsubu(v0, a0, a1); | 3928 __ Dsubu(v0, a0, a1); |
| 3929 } else { | 3929 } else { |
| 3930 // Untag before subtracting to avoid handling overflow. | 3930 // Untag before subtracting to avoid handling overflow. |
| 3931 __ SmiUntag(a1); | 3931 __ SmiUntag(a1); |
| 3932 __ SmiUntag(a0); | 3932 __ SmiUntag(a0); |
| 3933 __ Ret(USE_DELAY_SLOT); | 3933 __ Ret(USE_DELAY_SLOT); |
| 3934 __ Dsubu(v0, a1, a0); | 3934 __ Dsubu(v0, a1, a0); |
| 3935 } | 3935 } |
| 3936 | 3936 |
| 3937 __ bind(&miss); | 3937 __ bind(&miss); |
| 3938 GenerateMiss(masm); | 3938 GenerateMiss(masm); |
| 3939 } | 3939 } |
| 3940 | 3940 |
| 3941 | 3941 |
| 3942 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 3942 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
| 3943 ASSERT(state_ == CompareIC::NUMBER); | 3943 DCHECK(state_ == CompareIC::NUMBER); |
| 3944 | 3944 |
| 3945 Label generic_stub; | 3945 Label generic_stub; |
| 3946 Label unordered, maybe_undefined1, maybe_undefined2; | 3946 Label unordered, maybe_undefined1, maybe_undefined2; |
| 3947 Label miss; | 3947 Label miss; |
| 3948 | 3948 |
| 3949 if (left_ == CompareIC::SMI) { | 3949 if (left_ == CompareIC::SMI) { |
| 3950 __ JumpIfNotSmi(a1, &miss); | 3950 __ JumpIfNotSmi(a1, &miss); |
| 3951 } | 3951 } |
| 3952 if (right_ == CompareIC::SMI) { | 3952 if (right_ == CompareIC::SMI) { |
| 3953 __ JumpIfNotSmi(a0, &miss); | 3953 __ JumpIfNotSmi(a0, &miss); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3986 | 3986 |
| 3987 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. | 3987 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. |
| 3988 Label fpu_eq, fpu_lt; | 3988 Label fpu_eq, fpu_lt; |
| 3989 // Test if equal, and also handle the unordered/NaN case. | 3989 // Test if equal, and also handle the unordered/NaN case. |
| 3990 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); | 3990 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); |
| 3991 | 3991 |
| 3992 // Test if less (unordered case is already handled). | 3992 // Test if less (unordered case is already handled). |
| 3993 __ BranchF(&fpu_lt, NULL, lt, f0, f2); | 3993 __ BranchF(&fpu_lt, NULL, lt, f0, f2); |
| 3994 | 3994 |
| 3995 // Otherwise it's greater, so just fall thru, and return. | 3995 // Otherwise it's greater, so just fall thru, and return. |
| 3996 ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); | 3996 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); |
| 3997 __ Ret(USE_DELAY_SLOT); | 3997 __ Ret(USE_DELAY_SLOT); |
| 3998 __ li(v0, Operand(GREATER)); | 3998 __ li(v0, Operand(GREATER)); |
| 3999 | 3999 |
| 4000 __ bind(&fpu_eq); | 4000 __ bind(&fpu_eq); |
| 4001 __ Ret(USE_DELAY_SLOT); | 4001 __ Ret(USE_DELAY_SLOT); |
| 4002 __ li(v0, Operand(EQUAL)); | 4002 __ li(v0, Operand(EQUAL)); |
| 4003 | 4003 |
| 4004 __ bind(&fpu_lt); | 4004 __ bind(&fpu_lt); |
| 4005 __ Ret(USE_DELAY_SLOT); | 4005 __ Ret(USE_DELAY_SLOT); |
| 4006 __ li(v0, Operand(LESS)); | 4006 __ li(v0, Operand(LESS)); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 4026 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 4026 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 4027 __ Branch(&unordered, eq, a1, Operand(at)); | 4027 __ Branch(&unordered, eq, a1, Operand(at)); |
| 4028 } | 4028 } |
| 4029 | 4029 |
| 4030 __ bind(&miss); | 4030 __ bind(&miss); |
| 4031 GenerateMiss(masm); | 4031 GenerateMiss(masm); |
| 4032 } | 4032 } |
| 4033 | 4033 |
| 4034 | 4034 |
| 4035 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { | 4035 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { |
| 4036 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); | 4036 DCHECK(state_ == CompareIC::INTERNALIZED_STRING); |
| 4037 Label miss; | 4037 Label miss; |
| 4038 | 4038 |
| 4039 // Registers containing left and right operands respectively. | 4039 // Registers containing left and right operands respectively. |
| 4040 Register left = a1; | 4040 Register left = a1; |
| 4041 Register right = a0; | 4041 Register right = a0; |
| 4042 Register tmp1 = a2; | 4042 Register tmp1 = a2; |
| 4043 Register tmp2 = a3; | 4043 Register tmp2 = a3; |
| 4044 | 4044 |
| 4045 // Check that both operands are heap objects. | 4045 // Check that both operands are heap objects. |
| 4046 __ JumpIfEitherSmi(left, right, &miss); | 4046 __ JumpIfEitherSmi(left, right, &miss); |
| 4047 | 4047 |
| 4048 // Check that both operands are internalized strings. | 4048 // Check that both operands are internalized strings. |
| 4049 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 4049 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 4050 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 4050 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 4051 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 4051 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 4052 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 4052 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 4053 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 4053 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
| 4054 __ Or(tmp1, tmp1, Operand(tmp2)); | 4054 __ Or(tmp1, tmp1, Operand(tmp2)); |
| 4055 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 4055 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
| 4056 __ Branch(&miss, ne, at, Operand(zero_reg)); | 4056 __ Branch(&miss, ne, at, Operand(zero_reg)); |
| 4057 | 4057 |
| 4058 // Make sure a0 is non-zero. At this point input operands are | 4058 // Make sure a0 is non-zero. At this point input operands are |
| 4059 // guaranteed to be non-zero. | 4059 // guaranteed to be non-zero. |
| 4060 ASSERT(right.is(a0)); | 4060 DCHECK(right.is(a0)); |
| 4061 STATIC_ASSERT(EQUAL == 0); | 4061 STATIC_ASSERT(EQUAL == 0); |
| 4062 STATIC_ASSERT(kSmiTag == 0); | 4062 STATIC_ASSERT(kSmiTag == 0); |
| 4063 __ mov(v0, right); | 4063 __ mov(v0, right); |
| 4064 // Internalized strings are compared by identity. | 4064 // Internalized strings are compared by identity. |
| 4065 __ Ret(ne, left, Operand(right)); | 4065 __ Ret(ne, left, Operand(right)); |
| 4066 ASSERT(is_int16(EQUAL)); | 4066 DCHECK(is_int16(EQUAL)); |
| 4067 __ Ret(USE_DELAY_SLOT); | 4067 __ Ret(USE_DELAY_SLOT); |
| 4068 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 4068 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 4069 | 4069 |
| 4070 __ bind(&miss); | 4070 __ bind(&miss); |
| 4071 GenerateMiss(masm); | 4071 GenerateMiss(masm); |
| 4072 } | 4072 } |
| 4073 | 4073 |
| 4074 | 4074 |
| 4075 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | 4075 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { |
| 4076 ASSERT(state_ == CompareIC::UNIQUE_NAME); | 4076 DCHECK(state_ == CompareIC::UNIQUE_NAME); |
| 4077 ASSERT(GetCondition() == eq); | 4077 DCHECK(GetCondition() == eq); |
| 4078 Label miss; | 4078 Label miss; |
| 4079 | 4079 |
| 4080 // Registers containing left and right operands respectively. | 4080 // Registers containing left and right operands respectively. |
| 4081 Register left = a1; | 4081 Register left = a1; |
| 4082 Register right = a0; | 4082 Register right = a0; |
| 4083 Register tmp1 = a2; | 4083 Register tmp1 = a2; |
| 4084 Register tmp2 = a3; | 4084 Register tmp2 = a3; |
| 4085 | 4085 |
| 4086 // Check that both operands are heap objects. | 4086 // Check that both operands are heap objects. |
| 4087 __ JumpIfEitherSmi(left, right, &miss); | 4087 __ JumpIfEitherSmi(left, right, &miss); |
| 4088 | 4088 |
| 4089 // Check that both operands are unique names. This leaves the instance | 4089 // Check that both operands are unique names. This leaves the instance |
| 4090 // types loaded in tmp1 and tmp2. | 4090 // types loaded in tmp1 and tmp2. |
| 4091 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 4091 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 4092 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 4092 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 4093 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 4093 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 4094 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 4094 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 4095 | 4095 |
| 4096 __ JumpIfNotUniqueName(tmp1, &miss); | 4096 __ JumpIfNotUniqueName(tmp1, &miss); |
| 4097 __ JumpIfNotUniqueName(tmp2, &miss); | 4097 __ JumpIfNotUniqueName(tmp2, &miss); |
| 4098 | 4098 |
| 4099 // Use a0 as result | 4099 // Use a0 as result |
| 4100 __ mov(v0, a0); | 4100 __ mov(v0, a0); |
| 4101 | 4101 |
| 4102 // Unique names are compared by identity. | 4102 // Unique names are compared by identity. |
| 4103 Label done; | 4103 Label done; |
| 4104 __ Branch(&done, ne, left, Operand(right)); | 4104 __ Branch(&done, ne, left, Operand(right)); |
| 4105 // Make sure a0 is non-zero. At this point input operands are | 4105 // Make sure a0 is non-zero. At this point input operands are |
| 4106 // guaranteed to be non-zero. | 4106 // guaranteed to be non-zero. |
| 4107 ASSERT(right.is(a0)); | 4107 DCHECK(right.is(a0)); |
| 4108 STATIC_ASSERT(EQUAL == 0); | 4108 STATIC_ASSERT(EQUAL == 0); |
| 4109 STATIC_ASSERT(kSmiTag == 0); | 4109 STATIC_ASSERT(kSmiTag == 0); |
| 4110 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 4110 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 4111 __ bind(&done); | 4111 __ bind(&done); |
| 4112 __ Ret(); | 4112 __ Ret(); |
| 4113 | 4113 |
| 4114 __ bind(&miss); | 4114 __ bind(&miss); |
| 4115 GenerateMiss(masm); | 4115 GenerateMiss(masm); |
| 4116 } | 4116 } |
| 4117 | 4117 |
| 4118 | 4118 |
| 4119 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 4119 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
| 4120 ASSERT(state_ == CompareIC::STRING); | 4120 DCHECK(state_ == CompareIC::STRING); |
| 4121 Label miss; | 4121 Label miss; |
| 4122 | 4122 |
| 4123 bool equality = Token::IsEqualityOp(op_); | 4123 bool equality = Token::IsEqualityOp(op_); |
| 4124 | 4124 |
| 4125 // Registers containing left and right operands respectively. | 4125 // Registers containing left and right operands respectively. |
| 4126 Register left = a1; | 4126 Register left = a1; |
| 4127 Register right = a0; | 4127 Register right = a0; |
| 4128 Register tmp1 = a2; | 4128 Register tmp1 = a2; |
| 4129 Register tmp2 = a3; | 4129 Register tmp2 = a3; |
| 4130 Register tmp3 = a4; | 4130 Register tmp3 = a4; |
| (...skipping 22 matching lines...) Expand all Loading... |
| 4153 __ Ret(USE_DELAY_SLOT); | 4153 __ Ret(USE_DELAY_SLOT); |
| 4154 __ mov(v0, zero_reg); // In the delay slot. | 4154 __ mov(v0, zero_reg); // In the delay slot. |
| 4155 __ bind(&left_ne_right); | 4155 __ bind(&left_ne_right); |
| 4156 | 4156 |
| 4157 // Handle not identical strings. | 4157 // Handle not identical strings. |
| 4158 | 4158 |
| 4159 // Check that both strings are internalized strings. If they are, we're done | 4159 // Check that both strings are internalized strings. If they are, we're done |
| 4160 // because we already know they are not identical. We know they are both | 4160 // because we already know they are not identical. We know they are both |
| 4161 // strings. | 4161 // strings. |
| 4162 if (equality) { | 4162 if (equality) { |
| 4163 ASSERT(GetCondition() == eq); | 4163 DCHECK(GetCondition() == eq); |
| 4164 STATIC_ASSERT(kInternalizedTag == 0); | 4164 STATIC_ASSERT(kInternalizedTag == 0); |
| 4165 __ Or(tmp3, tmp1, Operand(tmp2)); | 4165 __ Or(tmp3, tmp1, Operand(tmp2)); |
| 4166 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask)); | 4166 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask)); |
| 4167 Label is_symbol; | 4167 Label is_symbol; |
| 4168 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg)); | 4168 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg)); |
| 4169 // Make sure a0 is non-zero. At this point input operands are | 4169 // Make sure a0 is non-zero. At this point input operands are |
| 4170 // guaranteed to be non-zero. | 4170 // guaranteed to be non-zero. |
| 4171 ASSERT(right.is(a0)); | 4171 DCHECK(right.is(a0)); |
| 4172 __ Ret(USE_DELAY_SLOT); | 4172 __ Ret(USE_DELAY_SLOT); |
| 4173 __ mov(v0, a0); // In the delay slot. | 4173 __ mov(v0, a0); // In the delay slot. |
| 4174 __ bind(&is_symbol); | 4174 __ bind(&is_symbol); |
| 4175 } | 4175 } |
| 4176 | 4176 |
| 4177 // Check that both strings are sequential ASCII. | 4177 // Check that both strings are sequential ASCII. |
| 4178 Label runtime; | 4178 Label runtime; |
| 4179 __ JumpIfBothInstanceTypesAreNotSequentialAscii( | 4179 __ JumpIfBothInstanceTypesAreNotSequentialAscii( |
| 4180 tmp1, tmp2, tmp3, tmp4, &runtime); | 4180 tmp1, tmp2, tmp3, tmp4, &runtime); |
| 4181 | 4181 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 4196 } else { | 4196 } else { |
| 4197 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 4197 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 4198 } | 4198 } |
| 4199 | 4199 |
| 4200 __ bind(&miss); | 4200 __ bind(&miss); |
| 4201 GenerateMiss(masm); | 4201 GenerateMiss(masm); |
| 4202 } | 4202 } |
| 4203 | 4203 |
| 4204 | 4204 |
| 4205 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 4205 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
| 4206 ASSERT(state_ == CompareIC::OBJECT); | 4206 DCHECK(state_ == CompareIC::OBJECT); |
| 4207 Label miss; | 4207 Label miss; |
| 4208 __ And(a2, a1, Operand(a0)); | 4208 __ And(a2, a1, Operand(a0)); |
| 4209 __ JumpIfSmi(a2, &miss); | 4209 __ JumpIfSmi(a2, &miss); |
| 4210 | 4210 |
| 4211 __ GetObjectType(a0, a2, a2); | 4211 __ GetObjectType(a0, a2, a2); |
| 4212 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); | 4212 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
| 4213 __ GetObjectType(a1, a2, a2); | 4213 __ GetObjectType(a1, a2, a2); |
| 4214 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); | 4214 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
| 4215 | 4215 |
| 4216 ASSERT(GetCondition() == eq); | 4216 DCHECK(GetCondition() == eq); |
| 4217 __ Ret(USE_DELAY_SLOT); | 4217 __ Ret(USE_DELAY_SLOT); |
| 4218 __ dsubu(v0, a0, a1); | 4218 __ dsubu(v0, a0, a1); |
| 4219 | 4219 |
| 4220 __ bind(&miss); | 4220 __ bind(&miss); |
| 4221 GenerateMiss(masm); | 4221 GenerateMiss(masm); |
| 4222 } | 4222 } |
| 4223 | 4223 |
| 4224 | 4224 |
| 4225 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | 4225 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { |
| 4226 Label miss; | 4226 Label miss; |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4295 } | 4295 } |
| 4296 | 4296 |
| 4297 | 4297 |
| 4298 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | 4298 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, |
| 4299 Label* miss, | 4299 Label* miss, |
| 4300 Label* done, | 4300 Label* done, |
| 4301 Register receiver, | 4301 Register receiver, |
| 4302 Register properties, | 4302 Register properties, |
| 4303 Handle<Name> name, | 4303 Handle<Name> name, |
| 4304 Register scratch0) { | 4304 Register scratch0) { |
| 4305 ASSERT(name->IsUniqueName()); | 4305 DCHECK(name->IsUniqueName()); |
| 4306 // If names of slots in range from 1 to kProbes - 1 for the hash value are | 4306 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
| 4307 // not equal to the name and kProbes-th slot is not used (its name is the | 4307 // not equal to the name and kProbes-th slot is not used (its name is the |
| 4308 // undefined value), it guarantees the hash table doesn't contain the | 4308 // undefined value), it guarantees the hash table doesn't contain the |
| 4309 // property. It's true even if some slots represent deleted properties | 4309 // property. It's true even if some slots represent deleted properties |
| 4310 // (their names are the hole value). | 4310 // (their names are the hole value). |
| 4311 for (int i = 0; i < kInlinedProbes; i++) { | 4311 for (int i = 0; i < kInlinedProbes; i++) { |
| 4312 // scratch0 points to properties hash. | 4312 // scratch0 points to properties hash. |
| 4313 // Compute the masked index: (hash + i + i * i) & mask. | 4313 // Compute the masked index: (hash + i + i * i) & mask. |
| 4314 Register index = scratch0; | 4314 Register index = scratch0; |
| 4315 // Capacity is smi 2^n. | 4315 // Capacity is smi 2^n. |
| 4316 __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset)); | 4316 __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset)); |
| 4317 __ Dsubu(index, index, Operand(1)); | 4317 __ Dsubu(index, index, Operand(1)); |
| 4318 __ And(index, index, | 4318 __ And(index, index, |
| 4319 Operand(name->Hash() + NameDictionary::GetProbeOffset(i))); | 4319 Operand(name->Hash() + NameDictionary::GetProbeOffset(i))); |
| 4320 | 4320 |
| 4321 // Scale the index by multiplying by the entry size. | 4321 // Scale the index by multiplying by the entry size. |
| 4322 ASSERT(NameDictionary::kEntrySize == 3); | 4322 DCHECK(NameDictionary::kEntrySize == 3); |
| 4323 __ dsll(at, index, 1); | 4323 __ dsll(at, index, 1); |
| 4324 __ Daddu(index, index, at); // index *= 3. | 4324 __ Daddu(index, index, at); // index *= 3. |
| 4325 | 4325 |
| 4326 Register entity_name = scratch0; | 4326 Register entity_name = scratch0; |
| 4327 // Having undefined at this place means the name is not contained. | 4327 // Having undefined at this place means the name is not contained. |
| 4328 ASSERT_EQ(kSmiTagSize, 1); | 4328 DCHECK_EQ(kSmiTagSize, 1); |
| 4329 Register tmp = properties; | 4329 Register tmp = properties; |
| 4330 | 4330 |
| 4331 __ dsll(scratch0, index, kPointerSizeLog2); | 4331 __ dsll(scratch0, index, kPointerSizeLog2); |
| 4332 __ Daddu(tmp, properties, scratch0); | 4332 __ Daddu(tmp, properties, scratch0); |
| 4333 __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 4333 __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
| 4334 | 4334 |
| 4335 ASSERT(!tmp.is(entity_name)); | 4335 DCHECK(!tmp.is(entity_name)); |
| 4336 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 4336 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
| 4337 __ Branch(done, eq, entity_name, Operand(tmp)); | 4337 __ Branch(done, eq, entity_name, Operand(tmp)); |
| 4338 | 4338 |
| 4339 // Load the hole ready for use below: | 4339 // Load the hole ready for use below: |
| 4340 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); | 4340 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
| 4341 | 4341 |
| 4342 // Stop if found the property. | 4342 // Stop if found the property. |
| 4343 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); | 4343 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); |
| 4344 | 4344 |
| 4345 Label good; | 4345 Label good; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4378 // |done| label if a property with the given name is found. Jump to | 4378 // |done| label if a property with the given name is found. Jump to |
| 4379 // the |miss| label otherwise. | 4379 // the |miss| label otherwise. |
| 4380 // If lookup was successful |scratch2| will be equal to elements + 4 * index. | 4380 // If lookup was successful |scratch2| will be equal to elements + 4 * index. |
| 4381 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, | 4381 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, |
| 4382 Label* miss, | 4382 Label* miss, |
| 4383 Label* done, | 4383 Label* done, |
| 4384 Register elements, | 4384 Register elements, |
| 4385 Register name, | 4385 Register name, |
| 4386 Register scratch1, | 4386 Register scratch1, |
| 4387 Register scratch2) { | 4387 Register scratch2) { |
| 4388 ASSERT(!elements.is(scratch1)); | 4388 DCHECK(!elements.is(scratch1)); |
| 4389 ASSERT(!elements.is(scratch2)); | 4389 DCHECK(!elements.is(scratch2)); |
| 4390 ASSERT(!name.is(scratch1)); | 4390 DCHECK(!name.is(scratch1)); |
| 4391 ASSERT(!name.is(scratch2)); | 4391 DCHECK(!name.is(scratch2)); |
| 4392 | 4392 |
| 4393 __ AssertName(name); | 4393 __ AssertName(name); |
| 4394 | 4394 |
| 4395 // Compute the capacity mask. | 4395 // Compute the capacity mask. |
| 4396 __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 4396 __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
| 4397 __ SmiUntag(scratch1); | 4397 __ SmiUntag(scratch1); |
| 4398 __ Dsubu(scratch1, scratch1, Operand(1)); | 4398 __ Dsubu(scratch1, scratch1, Operand(1)); |
| 4399 | 4399 |
| 4400 // Generate an unrolled loop that performs a few probes before | 4400 // Generate an unrolled loop that performs a few probes before |
| 4401 // giving up. Measurements done on Gmail indicate that 2 probes | 4401 // giving up. Measurements done on Gmail indicate that 2 probes |
| 4402 // cover ~93% of loads from dictionaries. | 4402 // cover ~93% of loads from dictionaries. |
| 4403 for (int i = 0; i < kInlinedProbes; i++) { | 4403 for (int i = 0; i < kInlinedProbes; i++) { |
| 4404 // Compute the masked index: (hash + i + i * i) & mask. | 4404 // Compute the masked index: (hash + i + i * i) & mask. |
| 4405 __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 4405 __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
| 4406 if (i > 0) { | 4406 if (i > 0) { |
| 4407 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4407 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 4408 // the hash in a separate instruction. The value hash + i + i * i is right | 4408 // the hash in a separate instruction. The value hash + i + i * i is right |
| 4409 // shifted in the following and instruction. | 4409 // shifted in the following and instruction. |
| 4410 ASSERT(NameDictionary::GetProbeOffset(i) < | 4410 DCHECK(NameDictionary::GetProbeOffset(i) < |
| 4411 1 << (32 - Name::kHashFieldOffset)); | 4411 1 << (32 - Name::kHashFieldOffset)); |
| 4412 __ Daddu(scratch2, scratch2, Operand( | 4412 __ Daddu(scratch2, scratch2, Operand( |
| 4413 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4413 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| 4414 } | 4414 } |
| 4415 __ dsrl(scratch2, scratch2, Name::kHashShift); | 4415 __ dsrl(scratch2, scratch2, Name::kHashShift); |
| 4416 __ And(scratch2, scratch1, scratch2); | 4416 __ And(scratch2, scratch1, scratch2); |
| 4417 | 4417 |
| 4418 // Scale the index by multiplying by the element size. | 4418 // Scale the index by multiplying by the element size. |
| 4419 ASSERT(NameDictionary::kEntrySize == 3); | 4419 DCHECK(NameDictionary::kEntrySize == 3); |
| 4420 // scratch2 = scratch2 * 3. | 4420 // scratch2 = scratch2 * 3. |
| 4421 | 4421 |
| 4422 __ dsll(at, scratch2, 1); | 4422 __ dsll(at, scratch2, 1); |
| 4423 __ Daddu(scratch2, scratch2, at); | 4423 __ Daddu(scratch2, scratch2, at); |
| 4424 | 4424 |
| 4425 // Check if the key is identical to the name. | 4425 // Check if the key is identical to the name. |
| 4426 __ dsll(at, scratch2, kPointerSizeLog2); | 4426 __ dsll(at, scratch2, kPointerSizeLog2); |
| 4427 __ Daddu(scratch2, elements, at); | 4427 __ Daddu(scratch2, elements, at); |
| 4428 __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset)); | 4428 __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset)); |
| 4429 __ Branch(done, eq, name, Operand(at)); | 4429 __ Branch(done, eq, name, Operand(at)); |
| 4430 } | 4430 } |
| 4431 | 4431 |
| 4432 const int spill_mask = | 4432 const int spill_mask = |
| 4433 (ra.bit() | a6.bit() | a5.bit() | a4.bit() | | 4433 (ra.bit() | a6.bit() | a5.bit() | a4.bit() | |
| 4434 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & | 4434 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & |
| 4435 ~(scratch1.bit() | scratch2.bit()); | 4435 ~(scratch1.bit() | scratch2.bit()); |
| 4436 | 4436 |
| 4437 __ MultiPush(spill_mask); | 4437 __ MultiPush(spill_mask); |
| 4438 if (name.is(a0)) { | 4438 if (name.is(a0)) { |
| 4439 ASSERT(!elements.is(a1)); | 4439 DCHECK(!elements.is(a1)); |
| 4440 __ Move(a1, name); | 4440 __ Move(a1, name); |
| 4441 __ Move(a0, elements); | 4441 __ Move(a0, elements); |
| 4442 } else { | 4442 } else { |
| 4443 __ Move(a0, elements); | 4443 __ Move(a0, elements); |
| 4444 __ Move(a1, name); | 4444 __ Move(a1, name); |
| 4445 } | 4445 } |
| 4446 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); | 4446 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); |
| 4447 __ CallStub(&stub); | 4447 __ CallStub(&stub); |
| 4448 __ mov(scratch2, a2); | 4448 __ mov(scratch2, a2); |
| 4449 __ mov(at, v0); | 4449 __ mov(at, v0); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4485 | 4485 |
| 4486 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 4486 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 4487 | 4487 |
| 4488 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 4488 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
| 4489 // Compute the masked index: (hash + i + i * i) & mask. | 4489 // Compute the masked index: (hash + i + i * i) & mask. |
| 4490 // Capacity is smi 2^n. | 4490 // Capacity is smi 2^n. |
| 4491 if (i > 0) { | 4491 if (i > 0) { |
| 4492 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4492 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 4493 // the hash in a separate instruction. The value hash + i + i * i is right | 4493 // the hash in a separate instruction. The value hash + i + i * i is right |
| 4494 // shifted in the following and instruction. | 4494 // shifted in the following and instruction. |
| 4495 ASSERT(NameDictionary::GetProbeOffset(i) < | 4495 DCHECK(NameDictionary::GetProbeOffset(i) < |
| 4496 1 << (32 - Name::kHashFieldOffset)); | 4496 1 << (32 - Name::kHashFieldOffset)); |
| 4497 __ Daddu(index, hash, Operand( | 4497 __ Daddu(index, hash, Operand( |
| 4498 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4498 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| 4499 } else { | 4499 } else { |
| 4500 __ mov(index, hash); | 4500 __ mov(index, hash); |
| 4501 } | 4501 } |
| 4502 __ dsrl(index, index, Name::kHashShift); | 4502 __ dsrl(index, index, Name::kHashShift); |
| 4503 __ And(index, mask, index); | 4503 __ And(index, mask, index); |
| 4504 | 4504 |
| 4505 // Scale the index by multiplying by the entry size. | 4505 // Scale the index by multiplying by the entry size. |
| 4506 ASSERT(NameDictionary::kEntrySize == 3); | 4506 DCHECK(NameDictionary::kEntrySize == 3); |
| 4507 // index *= 3. | 4507 // index *= 3. |
| 4508 __ mov(at, index); | 4508 __ mov(at, index); |
| 4509 __ dsll(index, index, 1); | 4509 __ dsll(index, index, 1); |
| 4510 __ Daddu(index, index, at); | 4510 __ Daddu(index, index, at); |
| 4511 | 4511 |
| 4512 | 4512 |
| 4513 ASSERT_EQ(kSmiTagSize, 1); | 4513 DCHECK_EQ(kSmiTagSize, 1); |
| 4514 __ dsll(index, index, kPointerSizeLog2); | 4514 __ dsll(index, index, kPointerSizeLog2); |
| 4515 __ Daddu(index, index, dictionary); | 4515 __ Daddu(index, index, dictionary); |
| 4516 __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 4516 __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
| 4517 | 4517 |
| 4518 // Having undefined at this place means the name is not contained. | 4518 // Having undefined at this place means the name is not contained. |
| 4519 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); | 4519 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); |
| 4520 | 4520 |
| 4521 // Stop if found the property. | 4521 // Stop if found the property. |
| 4522 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); | 4522 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); |
| 4523 | 4523 |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4640 __ Ret(); | 4640 __ Ret(); |
| 4641 } | 4641 } |
| 4642 | 4642 |
| 4643 | 4643 |
| 4644 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | 4644 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { |
| 4645 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | 4645 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
| 4646 int argument_count = 3; | 4646 int argument_count = 3; |
| 4647 __ PrepareCallCFunction(argument_count, regs_.scratch0()); | 4647 __ PrepareCallCFunction(argument_count, regs_.scratch0()); |
| 4648 Register address = | 4648 Register address = |
| 4649 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 4649 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); |
| 4650 ASSERT(!address.is(regs_.object())); | 4650 DCHECK(!address.is(regs_.object())); |
| 4651 ASSERT(!address.is(a0)); | 4651 DCHECK(!address.is(a0)); |
| 4652 __ Move(address, regs_.address()); | 4652 __ Move(address, regs_.address()); |
| 4653 __ Move(a0, regs_.object()); | 4653 __ Move(a0, regs_.object()); |
| 4654 __ Move(a1, address); | 4654 __ Move(a1, address); |
| 4655 __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); | 4655 __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); |
| 4656 | 4656 |
| 4657 AllowExternalCallThatCantCauseGC scope(masm); | 4657 AllowExternalCallThatCantCauseGC scope(masm); |
| 4658 __ CallCFunction( | 4658 __ CallCFunction( |
| 4659 ExternalReference::incremental_marking_record_write_function(isolate()), | 4659 ExternalReference::incremental_marking_record_write_function(isolate()), |
| 4660 argument_count); | 4660 argument_count); |
| 4661 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); | 4661 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); |
| (...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4862 __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart)); | 4862 __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart)); |
| 4863 | 4863 |
| 4864 // The caller's return address is above the saved temporaries. | 4864 // The caller's return address is above the saved temporaries. |
| 4865 // Grab that for the second argument to the hook. | 4865 // Grab that for the second argument to the hook. |
| 4866 __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize)); | 4866 __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize)); |
| 4867 | 4867 |
| 4868 // Align the stack if necessary. | 4868 // Align the stack if necessary. |
| 4869 int frame_alignment = masm->ActivationFrameAlignment(); | 4869 int frame_alignment = masm->ActivationFrameAlignment(); |
| 4870 if (frame_alignment > kPointerSize) { | 4870 if (frame_alignment > kPointerSize) { |
| 4871 __ mov(s5, sp); | 4871 __ mov(s5, sp); |
| 4872 ASSERT(IsPowerOf2(frame_alignment)); | 4872 DCHECK(IsPowerOf2(frame_alignment)); |
| 4873 __ And(sp, sp, Operand(-frame_alignment)); | 4873 __ And(sp, sp, Operand(-frame_alignment)); |
| 4874 } | 4874 } |
| 4875 | 4875 |
| 4876 __ Dsubu(sp, sp, kCArgsSlotsSize); | 4876 __ Dsubu(sp, sp, kCArgsSlotsSize); |
| 4877 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) | 4877 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) |
| 4878 int64_t entry_hook = | 4878 int64_t entry_hook = |
| 4879 reinterpret_cast<int64_t>(isolate()->function_entry_hook()); | 4879 reinterpret_cast<int64_t>(isolate()->function_entry_hook()); |
| 4880 __ li(t9, Operand(entry_hook)); | 4880 __ li(t9, Operand(entry_hook)); |
| 4881 #else | 4881 #else |
| 4882 // Under the simulator we need to indirect the entry hook through a | 4882 // Under the simulator we need to indirect the entry hook through a |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4930 | 4930 |
| 4931 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | 4931 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, |
| 4932 AllocationSiteOverrideMode mode) { | 4932 AllocationSiteOverrideMode mode) { |
| 4933 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | 4933 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) |
| 4934 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES) | 4934 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES) |
| 4935 // a0 - number of arguments | 4935 // a0 - number of arguments |
| 4936 // a1 - constructor? | 4936 // a1 - constructor? |
| 4937 // sp[0] - last argument | 4937 // sp[0] - last argument |
| 4938 Label normal_sequence; | 4938 Label normal_sequence; |
| 4939 if (mode == DONT_OVERRIDE) { | 4939 if (mode == DONT_OVERRIDE) { |
| 4940 ASSERT(FAST_SMI_ELEMENTS == 0); | 4940 DCHECK(FAST_SMI_ELEMENTS == 0); |
| 4941 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 4941 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 4942 ASSERT(FAST_ELEMENTS == 2); | 4942 DCHECK(FAST_ELEMENTS == 2); |
| 4943 ASSERT(FAST_HOLEY_ELEMENTS == 3); | 4943 DCHECK(FAST_HOLEY_ELEMENTS == 3); |
| 4944 ASSERT(FAST_DOUBLE_ELEMENTS == 4); | 4944 DCHECK(FAST_DOUBLE_ELEMENTS == 4); |
| 4945 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | 4945 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); |
| 4946 | 4946 |
| 4947 // is the low bit set? If so, we are holey and that is good. | 4947 // is the low bit set? If so, we are holey and that is good. |
| 4948 __ And(at, a3, Operand(1)); | 4948 __ And(at, a3, Operand(1)); |
| 4949 __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); | 4949 __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); |
| 4950 } | 4950 } |
| 4951 // look at the first argument | 4951 // look at the first argument |
| 4952 __ ld(a5, MemOperand(sp, 0)); | 4952 __ ld(a5, MemOperand(sp, 0)); |
| 4953 __ Branch(&normal_sequence, eq, a5, Operand(zero_reg)); | 4953 __ Branch(&normal_sequence, eq, a5, Operand(zero_reg)); |
| 4954 | 4954 |
| 4955 if (mode == DISABLE_ALLOCATION_SITES) { | 4955 if (mode == DISABLE_ALLOCATION_SITES) { |
| (...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5242 // Prepare arguments. | 5242 // Prepare arguments. |
| 5243 __ mov(scratch, sp); | 5243 __ mov(scratch, sp); |
| 5244 | 5244 |
| 5245 // Allocate the v8::Arguments structure in the arguments' space since | 5245 // Allocate the v8::Arguments structure in the arguments' space since |
| 5246 // it's not controlled by GC. | 5246 // it's not controlled by GC. |
| 5247 const int kApiStackSpace = 4; | 5247 const int kApiStackSpace = 4; |
| 5248 | 5248 |
| 5249 FrameScope frame_scope(masm, StackFrame::MANUAL); | 5249 FrameScope frame_scope(masm, StackFrame::MANUAL); |
| 5250 __ EnterExitFrame(false, kApiStackSpace); | 5250 __ EnterExitFrame(false, kApiStackSpace); |
| 5251 | 5251 |
| 5252 ASSERT(!api_function_address.is(a0) && !scratch.is(a0)); | 5252 DCHECK(!api_function_address.is(a0) && !scratch.is(a0)); |
| 5253 // a0 = FunctionCallbackInfo& | 5253 // a0 = FunctionCallbackInfo& |
| 5254 // Arguments is after the return address. | 5254 // Arguments is after the return address. |
| 5255 __ Daddu(a0, sp, Operand(1 * kPointerSize)); | 5255 __ Daddu(a0, sp, Operand(1 * kPointerSize)); |
| 5256 // FunctionCallbackInfo::implicit_args_ | 5256 // FunctionCallbackInfo::implicit_args_ |
| 5257 __ sd(scratch, MemOperand(a0, 0 * kPointerSize)); | 5257 __ sd(scratch, MemOperand(a0, 0 * kPointerSize)); |
| 5258 // FunctionCallbackInfo::values_ | 5258 // FunctionCallbackInfo::values_ |
| 5259 __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | 5259 __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); |
| 5260 __ sd(at, MemOperand(a0, 1 * kPointerSize)); | 5260 __ sd(at, MemOperand(a0, 1 * kPointerSize)); |
| 5261 // FunctionCallbackInfo::length_ = argc | 5261 // FunctionCallbackInfo::length_ = argc |
| 5262 __ li(at, Operand(argc)); | 5262 __ li(at, Operand(argc)); |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5320 MemOperand(fp, 6 * kPointerSize), | 5320 MemOperand(fp, 6 * kPointerSize), |
| 5321 NULL); | 5321 NULL); |
| 5322 } | 5322 } |
| 5323 | 5323 |
| 5324 | 5324 |
| 5325 #undef __ | 5325 #undef __ |
| 5326 | 5326 |
| 5327 } } // namespace v8::internal | 5327 } } // namespace v8::internal |
| 5328 | 5328 |
| 5329 #endif // V8_TARGET_ARCH_MIPS64 | 5329 #endif // V8_TARGET_ARCH_MIPS64 |
| OLD | NEW |