| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS |
| 8 | 8 |
| 9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 350 | 350 |
| 351 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | 351 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
| 352 // Update the static counter each time a new code stub is generated. | 352 // Update the static counter each time a new code stub is generated. |
| 353 isolate()->counters()->code_stubs()->Increment(); | 353 isolate()->counters()->code_stubs()->Increment(); |
| 354 | 354 |
| 355 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); | 355 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); |
| 356 int param_count = descriptor->GetEnvironmentParameterCount(); | 356 int param_count = descriptor->GetEnvironmentParameterCount(); |
| 357 { | 357 { |
| 358 // Call the runtime system in a fresh internal frame. | 358 // Call the runtime system in a fresh internal frame. |
| 359 FrameScope scope(masm, StackFrame::INTERNAL); | 359 FrameScope scope(masm, StackFrame::INTERNAL); |
| 360 ASSERT(param_count == 0 || | 360 DCHECK(param_count == 0 || |
| 361 a0.is(descriptor->GetEnvironmentParameterRegister( | 361 a0.is(descriptor->GetEnvironmentParameterRegister( |
| 362 param_count - 1))); | 362 param_count - 1))); |
| 363 // Push arguments, adjust sp. | 363 // Push arguments, adjust sp. |
| 364 __ Subu(sp, sp, Operand(param_count * kPointerSize)); | 364 __ Subu(sp, sp, Operand(param_count * kPointerSize)); |
| 365 for (int i = 0; i < param_count; ++i) { | 365 for (int i = 0; i < param_count; ++i) { |
| 366 // Store argument to stack. | 366 // Store argument to stack. |
| 367 __ sw(descriptor->GetEnvironmentParameterRegister(i), | 367 __ sw(descriptor->GetEnvironmentParameterRegister(i), |
| 368 MemOperand(sp, (param_count-1-i) * kPointerSize)); | 368 MemOperand(sp, (param_count-1-i) * kPointerSize)); |
| 369 } | 369 } |
| 370 ExternalReference miss = descriptor->miss_handler(); | 370 ExternalReference miss = descriptor->miss_handler(); |
| (...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 643 __ li(scratch_, Operand(non_smi_exponent)); | 643 __ li(scratch_, Operand(non_smi_exponent)); |
| 644 // Set the sign bit in scratch_ if the value was negative. | 644 // Set the sign bit in scratch_ if the value was negative. |
| 645 __ or_(scratch_, scratch_, sign_); | 645 __ or_(scratch_, scratch_, sign_); |
| 646 // Subtract from 0 if the value was negative. | 646 // Subtract from 0 if the value was negative. |
| 647 __ subu(at, zero_reg, the_int_); | 647 __ subu(at, zero_reg, the_int_); |
| 648 __ Movn(the_int_, at, sign_); | 648 __ Movn(the_int_, at, sign_); |
| 649 // We should be masking the implict first digit of the mantissa away here, | 649 // We should be masking the implict first digit of the mantissa away here, |
| 650 // but it just ends up combining harmlessly with the last digit of the | 650 // but it just ends up combining harmlessly with the last digit of the |
| 651 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get | 651 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get |
| 652 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | 652 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. |
| 653 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | 653 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
| 654 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 654 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
| 655 __ srl(at, the_int_, shift_distance); | 655 __ srl(at, the_int_, shift_distance); |
| 656 __ or_(scratch_, scratch_, at); | 656 __ or_(scratch_, scratch_, at); |
| 657 __ sw(scratch_, FieldMemOperand(the_heap_number_, | 657 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
| 658 HeapNumber::kExponentOffset)); | 658 HeapNumber::kExponentOffset)); |
| 659 __ sll(scratch_, the_int_, 32 - shift_distance); | 659 __ sll(scratch_, the_int_, 32 - shift_distance); |
| 660 __ Ret(USE_DELAY_SLOT); | 660 __ Ret(USE_DELAY_SLOT); |
| 661 __ sw(scratch_, FieldMemOperand(the_heap_number_, | 661 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
| 662 HeapNumber::kMantissaOffset)); | 662 HeapNumber::kMantissaOffset)); |
| 663 | 663 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 704 // Comparing JS objects with <=, >= is complicated. | 704 // Comparing JS objects with <=, >= is complicated. |
| 705 if (cc != eq) { | 705 if (cc != eq) { |
| 706 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 706 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 707 // Normally here we fall through to return_equal, but undefined is | 707 // Normally here we fall through to return_equal, but undefined is |
| 708 // special: (undefined == undefined) == true, but | 708 // special: (undefined == undefined) == true, but |
| 709 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 709 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
| 710 if (cc == less_equal || cc == greater_equal) { | 710 if (cc == less_equal || cc == greater_equal) { |
| 711 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); | 711 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); |
| 712 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); | 712 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); |
| 713 __ Branch(&return_equal, ne, a0, Operand(t2)); | 713 __ Branch(&return_equal, ne, a0, Operand(t2)); |
| 714 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 714 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 715 __ Ret(USE_DELAY_SLOT); | 715 __ Ret(USE_DELAY_SLOT); |
| 716 if (cc == le) { | 716 if (cc == le) { |
| 717 // undefined <= undefined should fail. | 717 // undefined <= undefined should fail. |
| 718 __ li(v0, Operand(GREATER)); | 718 __ li(v0, Operand(GREATER)); |
| 719 } else { | 719 } else { |
| 720 // undefined >= undefined should fail. | 720 // undefined >= undefined should fail. |
| 721 __ li(v0, Operand(LESS)); | 721 __ li(v0, Operand(LESS)); |
| 722 } | 722 } |
| 723 } | 723 } |
| 724 } | 724 } |
| 725 } | 725 } |
| 726 | 726 |
| 727 __ bind(&return_equal); | 727 __ bind(&return_equal); |
| 728 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 728 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 729 __ Ret(USE_DELAY_SLOT); | 729 __ Ret(USE_DELAY_SLOT); |
| 730 if (cc == less) { | 730 if (cc == less) { |
| 731 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. | 731 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. |
| 732 } else if (cc == greater) { | 732 } else if (cc == greater) { |
| 733 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. | 733 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. |
| 734 } else { | 734 } else { |
| 735 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. | 735 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. |
| 736 } | 736 } |
| 737 | 737 |
| 738 // For less and greater we don't have to check for NaN since the result of | 738 // For less and greater we don't have to check for NaN since the result of |
| (...skipping 18 matching lines...) Expand all Loading... |
| 757 // Or with all low-bits of mantissa. | 757 // Or with all low-bits of mantissa. |
| 758 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); | 758 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
| 759 __ Or(v0, t3, Operand(t2)); | 759 __ Or(v0, t3, Operand(t2)); |
| 760 // For equal we already have the right value in v0: Return zero (equal) | 760 // For equal we already have the right value in v0: Return zero (equal) |
| 761 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 761 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
| 762 // not (it's a NaN). For <= and >= we need to load v0 with the failing | 762 // not (it's a NaN). For <= and >= we need to load v0 with the failing |
| 763 // value if it's a NaN. | 763 // value if it's a NaN. |
| 764 if (cc != eq) { | 764 if (cc != eq) { |
| 765 // All-zero means Infinity means equal. | 765 // All-zero means Infinity means equal. |
| 766 __ Ret(eq, v0, Operand(zero_reg)); | 766 __ Ret(eq, v0, Operand(zero_reg)); |
| 767 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 767 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 768 __ Ret(USE_DELAY_SLOT); | 768 __ Ret(USE_DELAY_SLOT); |
| 769 if (cc == le) { | 769 if (cc == le) { |
| 770 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. | 770 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. |
| 771 } else { | 771 } else { |
| 772 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. | 772 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. |
| 773 } | 773 } |
| 774 } | 774 } |
| 775 } | 775 } |
| 776 // No fall through here. | 776 // No fall through here. |
| 777 | 777 |
| 778 __ bind(¬_identical); | 778 __ bind(¬_identical); |
| 779 } | 779 } |
| 780 | 780 |
| 781 | 781 |
| 782 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 782 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 783 Register lhs, | 783 Register lhs, |
| 784 Register rhs, | 784 Register rhs, |
| 785 Label* both_loaded_as_doubles, | 785 Label* both_loaded_as_doubles, |
| 786 Label* slow, | 786 Label* slow, |
| 787 bool strict) { | 787 bool strict) { |
| 788 ASSERT((lhs.is(a0) && rhs.is(a1)) || | 788 DCHECK((lhs.is(a0) && rhs.is(a1)) || |
| 789 (lhs.is(a1) && rhs.is(a0))); | 789 (lhs.is(a1) && rhs.is(a0))); |
| 790 | 790 |
| 791 Label lhs_is_smi; | 791 Label lhs_is_smi; |
| 792 __ JumpIfSmi(lhs, &lhs_is_smi); | 792 __ JumpIfSmi(lhs, &lhs_is_smi); |
| 793 // Rhs is a Smi. | 793 // Rhs is a Smi. |
| 794 // Check whether the non-smi is a heap number. | 794 // Check whether the non-smi is a heap number. |
| 795 __ GetObjectType(lhs, t4, t4); | 795 __ GetObjectType(lhs, t4, t4); |
| 796 if (strict) { | 796 if (strict) { |
| 797 // If lhs was not a number and rhs was a Smi then strict equality cannot | 797 // If lhs was not a number and rhs was a Smi then strict equality cannot |
| 798 // succeed. Return non-equal (lhs is already not zero). | 798 // succeed. Return non-equal (lhs is already not zero). |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 896 __ jmp(both_loaded_as_doubles); | 896 __ jmp(both_loaded_as_doubles); |
| 897 } | 897 } |
| 898 | 898 |
| 899 | 899 |
| 900 // Fast negative check for internalized-to-internalized equality. | 900 // Fast negative check for internalized-to-internalized equality. |
| 901 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 901 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
| 902 Register lhs, | 902 Register lhs, |
| 903 Register rhs, | 903 Register rhs, |
| 904 Label* possible_strings, | 904 Label* possible_strings, |
| 905 Label* not_both_strings) { | 905 Label* not_both_strings) { |
| 906 ASSERT((lhs.is(a0) && rhs.is(a1)) || | 906 DCHECK((lhs.is(a0) && rhs.is(a1)) || |
| 907 (lhs.is(a1) && rhs.is(a0))); | 907 (lhs.is(a1) && rhs.is(a0))); |
| 908 | 908 |
| 909 // a2 is object type of rhs. | 909 // a2 is object type of rhs. |
| 910 Label object_test; | 910 Label object_test; |
| 911 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 911 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
| 912 __ And(at, a2, Operand(kIsNotStringMask)); | 912 __ And(at, a2, Operand(kIsNotStringMask)); |
| 913 __ Branch(&object_test, ne, at, Operand(zero_reg)); | 913 __ Branch(&object_test, ne, at, Operand(zero_reg)); |
| 914 __ And(at, a2, Operand(kIsNotInternalizedMask)); | 914 __ And(at, a2, Operand(kIsNotInternalizedMask)); |
| 915 __ Branch(possible_strings, ne, at, Operand(zero_reg)); | 915 __ Branch(possible_strings, ne, at, Operand(zero_reg)); |
| 916 __ GetObjectType(rhs, a3, a3); | 916 __ GetObjectType(rhs, a3, a3); |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 987 // NOTICE! This code is only reached after a smi-fast-case check, so | 987 // NOTICE! This code is only reached after a smi-fast-case check, so |
| 988 // it is certain that at least one operand isn't a smi. | 988 // it is certain that at least one operand isn't a smi. |
| 989 | 989 |
| 990 // Handle the case where the objects are identical. Either returns the answer | 990 // Handle the case where the objects are identical. Either returns the answer |
| 991 // or goes to slow. Only falls through if the objects were not identical. | 991 // or goes to slow. Only falls through if the objects were not identical. |
| 992 EmitIdenticalObjectComparison(masm, &slow, cc); | 992 EmitIdenticalObjectComparison(masm, &slow, cc); |
| 993 | 993 |
| 994 // If either is a Smi (we know that not both are), then they can only | 994 // If either is a Smi (we know that not both are), then they can only |
| 995 // be strictly equal if the other is a HeapNumber. | 995 // be strictly equal if the other is a HeapNumber. |
| 996 STATIC_ASSERT(kSmiTag == 0); | 996 STATIC_ASSERT(kSmiTag == 0); |
| 997 ASSERT_EQ(0, Smi::FromInt(0)); | 997 DCHECK_EQ(0, Smi::FromInt(0)); |
| 998 __ And(t2, lhs, Operand(rhs)); | 998 __ And(t2, lhs, Operand(rhs)); |
| 999 __ JumpIfNotSmi(t2, ¬_smis, t0); | 999 __ JumpIfNotSmi(t2, ¬_smis, t0); |
| 1000 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | 1000 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
| 1001 // 1) Return the answer. | 1001 // 1) Return the answer. |
| 1002 // 2) Go to slow. | 1002 // 2) Go to slow. |
| 1003 // 3) Fall through to both_loaded_as_doubles. | 1003 // 3) Fall through to both_loaded_as_doubles. |
| 1004 // 4) Jump to rhs_not_nan. | 1004 // 4) Jump to rhs_not_nan. |
| 1005 // In cases 3 and 4 we have found out we were dealing with a number-number | 1005 // In cases 3 and 4 we have found out we were dealing with a number-number |
| 1006 // comparison and the numbers have been loaded into f12 and f14 as doubles, | 1006 // comparison and the numbers have been loaded into f12 and f14 as doubles, |
| 1007 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. | 1007 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1031 // Check if EQUAL condition is satisfied. If true, move conditionally | 1031 // Check if EQUAL condition is satisfied. If true, move conditionally |
| 1032 // result to v0. | 1032 // result to v0. |
| 1033 __ c(EQ, D, f12, f14); | 1033 __ c(EQ, D, f12, f14); |
| 1034 __ Movt(v0, t2); | 1034 __ Movt(v0, t2); |
| 1035 | 1035 |
| 1036 __ Ret(); | 1036 __ Ret(); |
| 1037 | 1037 |
| 1038 __ bind(&nan); | 1038 __ bind(&nan); |
| 1039 // NaN comparisons always fail. | 1039 // NaN comparisons always fail. |
| 1040 // Load whatever we need in v0 to make the comparison fail. | 1040 // Load whatever we need in v0 to make the comparison fail. |
| 1041 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 1041 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 1042 __ Ret(USE_DELAY_SLOT); | 1042 __ Ret(USE_DELAY_SLOT); |
| 1043 if (cc == lt || cc == le) { | 1043 if (cc == lt || cc == le) { |
| 1044 __ li(v0, Operand(GREATER)); | 1044 __ li(v0, Operand(GREATER)); |
| 1045 } else { | 1045 } else { |
| 1046 __ li(v0, Operand(LESS)); | 1046 __ li(v0, Operand(LESS)); |
| 1047 } | 1047 } |
| 1048 | 1048 |
| 1049 | 1049 |
| 1050 __ bind(¬_smis); | 1050 __ bind(¬_smis); |
| 1051 // At this point we know we are dealing with two different objects, | 1051 // At this point we know we are dealing with two different objects, |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1113 // Figure out which native to call and setup the arguments. | 1113 // Figure out which native to call and setup the arguments. |
| 1114 Builtins::JavaScript native; | 1114 Builtins::JavaScript native; |
| 1115 if (cc == eq) { | 1115 if (cc == eq) { |
| 1116 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | 1116 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
| 1117 } else { | 1117 } else { |
| 1118 native = Builtins::COMPARE; | 1118 native = Builtins::COMPARE; |
| 1119 int ncr; // NaN compare result. | 1119 int ncr; // NaN compare result. |
| 1120 if (cc == lt || cc == le) { | 1120 if (cc == lt || cc == le) { |
| 1121 ncr = GREATER; | 1121 ncr = GREATER; |
| 1122 } else { | 1122 } else { |
| 1123 ASSERT(cc == gt || cc == ge); // Remaining cases. | 1123 DCHECK(cc == gt || cc == ge); // Remaining cases. |
| 1124 ncr = LESS; | 1124 ncr = LESS; |
| 1125 } | 1125 } |
| 1126 __ li(a0, Operand(Smi::FromInt(ncr))); | 1126 __ li(a0, Operand(Smi::FromInt(ncr))); |
| 1127 __ push(a0); | 1127 __ push(a0); |
| 1128 } | 1128 } |
| 1129 | 1129 |
| 1130 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 1130 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| 1131 // tagged as a small integer. | 1131 // tagged as a small integer. |
| 1132 __ InvokeBuiltin(native, JUMP_FUNCTION); | 1132 __ InvokeBuiltin(native, JUMP_FUNCTION); |
| 1133 | 1133 |
| (...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1364 __ bind(&call_runtime); | 1364 __ bind(&call_runtime); |
| 1365 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); | 1365 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); |
| 1366 | 1366 |
| 1367 // The stub is called from non-optimized code, which expects the result | 1367 // The stub is called from non-optimized code, which expects the result |
| 1368 // as heap number in exponent. | 1368 // as heap number in exponent. |
| 1369 __ bind(&done); | 1369 __ bind(&done); |
| 1370 __ AllocateHeapNumber( | 1370 __ AllocateHeapNumber( |
| 1371 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); | 1371 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); |
| 1372 __ sdc1(double_result, | 1372 __ sdc1(double_result, |
| 1373 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 1373 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
| 1374 ASSERT(heapnumber.is(v0)); | 1374 DCHECK(heapnumber.is(v0)); |
| 1375 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); | 1375 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); |
| 1376 __ DropAndRet(2); | 1376 __ DropAndRet(2); |
| 1377 } else { | 1377 } else { |
| 1378 __ push(ra); | 1378 __ push(ra); |
| 1379 { | 1379 { |
| 1380 AllowExternalCallThatCantCauseGC scope(masm); | 1380 AllowExternalCallThatCantCauseGC scope(masm); |
| 1381 __ PrepareCallCFunction(0, 2, scratch); | 1381 __ PrepareCallCFunction(0, 2, scratch); |
| 1382 __ MovToFloatParameters(double_base, double_exponent); | 1382 __ MovToFloatParameters(double_base, double_exponent); |
| 1383 __ CallCFunction( | 1383 __ CallCFunction( |
| 1384 ExternalReference::power_double_double_function(isolate()), | 1384 ExternalReference::power_double_double_function(isolate()), |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1511 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. | 1511 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. |
| 1512 // Stack space reservation moved to the branch delay slot below. | 1512 // Stack space reservation moved to the branch delay slot below. |
| 1513 // Stack is still aligned. | 1513 // Stack is still aligned. |
| 1514 | 1514 |
| 1515 // Call the C routine. | 1515 // Call the C routine. |
| 1516 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. | 1516 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. |
| 1517 masm->jalr(t9); | 1517 masm->jalr(t9); |
| 1518 // Set up sp in the delay slot. | 1518 // Set up sp in the delay slot. |
| 1519 masm->addiu(sp, sp, -kCArgsSlotsSize); | 1519 masm->addiu(sp, sp, -kCArgsSlotsSize); |
| 1520 // Make sure the stored 'ra' points to this position. | 1520 // Make sure the stored 'ra' points to this position. |
| 1521 ASSERT_EQ(kNumInstructionsToJump, | 1521 DCHECK_EQ(kNumInstructionsToJump, |
| 1522 masm->InstructionsGeneratedSince(&find_ra)); | 1522 masm->InstructionsGeneratedSince(&find_ra)); |
| 1523 } | 1523 } |
| 1524 | 1524 |
| 1525 | 1525 |
| 1526 // Runtime functions should not return 'the hole'. Allowing it to escape may | 1526 // Runtime functions should not return 'the hole'. Allowing it to escape may |
| 1527 // lead to crashes in the IC code later. | 1527 // lead to crashes in the IC code later. |
| 1528 if (FLAG_debug_code) { | 1528 if (FLAG_debug_code) { |
| 1529 Label okay; | 1529 Label okay; |
| 1530 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); | 1530 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); |
| 1531 __ Branch(&okay, ne, v0, Operand(t0)); | 1531 __ Branch(&okay, ne, v0, Operand(t0)); |
| (...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1762 // Uses registers a0 to t0. | 1762 // Uses registers a0 to t0. |
| 1763 // Expected input (depending on whether args are in registers or on the stack): | 1763 // Expected input (depending on whether args are in registers or on the stack): |
| 1764 // * object: a0 or at sp + 1 * kPointerSize. | 1764 // * object: a0 or at sp + 1 * kPointerSize. |
| 1765 // * function: a1 or at sp. | 1765 // * function: a1 or at sp. |
| 1766 // | 1766 // |
| 1767 // An inlined call site may have been generated before calling this stub. | 1767 // An inlined call site may have been generated before calling this stub. |
| 1768 // In this case the offset to the inline site to patch is passed on the stack, | 1768 // In this case the offset to the inline site to patch is passed on the stack, |
| 1769 // in the safepoint slot for register t0. | 1769 // in the safepoint slot for register t0. |
| 1770 void InstanceofStub::Generate(MacroAssembler* masm) { | 1770 void InstanceofStub::Generate(MacroAssembler* masm) { |
| 1771 // Call site inlining and patching implies arguments in registers. | 1771 // Call site inlining and patching implies arguments in registers. |
| 1772 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); | 1772 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
| 1773 // ReturnTrueFalse is only implemented for inlined call sites. | 1773 // ReturnTrueFalse is only implemented for inlined call sites. |
| 1774 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); | 1774 DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); |
| 1775 | 1775 |
| 1776 // Fixed register usage throughout the stub: | 1776 // Fixed register usage throughout the stub: |
| 1777 const Register object = a0; // Object (lhs). | 1777 const Register object = a0; // Object (lhs). |
| 1778 Register map = a3; // Map of the object. | 1778 Register map = a3; // Map of the object. |
| 1779 const Register function = a1; // Function (rhs). | 1779 const Register function = a1; // Function (rhs). |
| 1780 const Register prototype = t0; // Prototype of the function. | 1780 const Register prototype = t0; // Prototype of the function. |
| 1781 const Register inline_site = t5; | 1781 const Register inline_site = t5; |
| 1782 const Register scratch = a2; | 1782 const Register scratch = a2; |
| 1783 | 1783 |
| 1784 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize; | 1784 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize; |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1814 // Check that the function prototype is a JS object. | 1814 // Check that the function prototype is a JS object. |
| 1815 __ JumpIfSmi(prototype, &slow); | 1815 __ JumpIfSmi(prototype, &slow); |
| 1816 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); | 1816 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
| 1817 | 1817 |
| 1818 // Update the global instanceof or call site inlined cache with the current | 1818 // Update the global instanceof or call site inlined cache with the current |
| 1819 // map and function. The cached answer will be set when it is known below. | 1819 // map and function. The cached answer will be set when it is known below. |
| 1820 if (!HasCallSiteInlineCheck()) { | 1820 if (!HasCallSiteInlineCheck()) { |
| 1821 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 1821 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
| 1822 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | 1822 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
| 1823 } else { | 1823 } else { |
| 1824 ASSERT(HasArgsInRegisters()); | 1824 DCHECK(HasArgsInRegisters()); |
| 1825 // Patch the (relocated) inlined map check. | 1825 // Patch the (relocated) inlined map check. |
| 1826 | 1826 |
| 1827 // The offset was stored in t0 safepoint slot. | 1827 // The offset was stored in t0 safepoint slot. |
| 1828 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). | 1828 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). |
| 1829 __ LoadFromSafepointRegisterSlot(scratch, t0); | 1829 __ LoadFromSafepointRegisterSlot(scratch, t0); |
| 1830 __ Subu(inline_site, ra, scratch); | 1830 __ Subu(inline_site, ra, scratch); |
| 1831 // Get the map location in scratch and patch it. | 1831 // Get the map location in scratch and patch it. |
| 1832 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch. | 1832 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch. |
| 1833 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset)); | 1833 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset)); |
| 1834 } | 1834 } |
| 1835 | 1835 |
| 1836 // Register mapping: a3 is object map and t0 is function prototype. | 1836 // Register mapping: a3 is object map and t0 is function prototype. |
| 1837 // Get prototype of object into a2. | 1837 // Get prototype of object into a2. |
| 1838 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 1838 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
| 1839 | 1839 |
| 1840 // We don't need map any more. Use it as a scratch register. | 1840 // We don't need map any more. Use it as a scratch register. |
| 1841 Register scratch2 = map; | 1841 Register scratch2 = map; |
| 1842 map = no_reg; | 1842 map = no_reg; |
| 1843 | 1843 |
| 1844 // Loop through the prototype chain looking for the function prototype. | 1844 // Loop through the prototype chain looking for the function prototype. |
| 1845 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); | 1845 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
| 1846 __ bind(&loop); | 1846 __ bind(&loop); |
| 1847 __ Branch(&is_instance, eq, scratch, Operand(prototype)); | 1847 __ Branch(&is_instance, eq, scratch, Operand(prototype)); |
| 1848 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); | 1848 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); |
| 1849 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 1849 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 1850 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 1850 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
| 1851 __ Branch(&loop); | 1851 __ Branch(&loop); |
| 1852 | 1852 |
| 1853 __ bind(&is_instance); | 1853 __ bind(&is_instance); |
| 1854 ASSERT(Smi::FromInt(0) == 0); | 1854 DCHECK(Smi::FromInt(0) == 0); |
| 1855 if (!HasCallSiteInlineCheck()) { | 1855 if (!HasCallSiteInlineCheck()) { |
| 1856 __ mov(v0, zero_reg); | 1856 __ mov(v0, zero_reg); |
| 1857 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 1857 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 1858 } else { | 1858 } else { |
| 1859 // Patch the call site to return true. | 1859 // Patch the call site to return true. |
| 1860 __ LoadRoot(v0, Heap::kTrueValueRootIndex); | 1860 __ LoadRoot(v0, Heap::kTrueValueRootIndex); |
| 1861 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); | 1861 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
| 1862 // Get the boolean result location in scratch and patch it. | 1862 // Get the boolean result location in scratch and patch it. |
| 1863 __ PatchRelocatedValue(inline_site, scratch, v0); | 1863 __ PatchRelocatedValue(inline_site, scratch, v0); |
| 1864 | 1864 |
| 1865 if (!ReturnTrueFalseObject()) { | 1865 if (!ReturnTrueFalseObject()) { |
| 1866 ASSERT_EQ(Smi::FromInt(0), 0); | 1866 DCHECK_EQ(Smi::FromInt(0), 0); |
| 1867 __ mov(v0, zero_reg); | 1867 __ mov(v0, zero_reg); |
| 1868 } | 1868 } |
| 1869 } | 1869 } |
| 1870 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 1870 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
| 1871 | 1871 |
| 1872 __ bind(&is_not_instance); | 1872 __ bind(&is_not_instance); |
| 1873 if (!HasCallSiteInlineCheck()) { | 1873 if (!HasCallSiteInlineCheck()) { |
| 1874 __ li(v0, Operand(Smi::FromInt(1))); | 1874 __ li(v0, Operand(Smi::FromInt(1))); |
| 1875 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 1875 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 1876 } else { | 1876 } else { |
| (...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2077 __ bind(&skip_min); | 2077 __ bind(&skip_min); |
| 2078 | 2078 |
| 2079 __ bind(&try_allocate); | 2079 __ bind(&try_allocate); |
| 2080 | 2080 |
| 2081 // Compute the sizes of backing store, parameter map, and arguments object. | 2081 // Compute the sizes of backing store, parameter map, and arguments object. |
| 2082 // 1. Parameter map, has 2 extra words containing context and backing store. | 2082 // 1. Parameter map, has 2 extra words containing context and backing store. |
| 2083 const int kParameterMapHeaderSize = | 2083 const int kParameterMapHeaderSize = |
| 2084 FixedArray::kHeaderSize + 2 * kPointerSize; | 2084 FixedArray::kHeaderSize + 2 * kPointerSize; |
| 2085 // If there are no mapped parameters, we do not need the parameter_map. | 2085 // If there are no mapped parameters, we do not need the parameter_map. |
| 2086 Label param_map_size; | 2086 Label param_map_size; |
| 2087 ASSERT_EQ(0, Smi::FromInt(0)); | 2087 DCHECK_EQ(0, Smi::FromInt(0)); |
| 2088 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); | 2088 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); |
| 2089 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0. | 2089 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0. |
| 2090 __ sll(t5, a1, 1); | 2090 __ sll(t5, a1, 1); |
| 2091 __ addiu(t5, t5, kParameterMapHeaderSize); | 2091 __ addiu(t5, t5, kParameterMapHeaderSize); |
| 2092 __ bind(¶m_map_size); | 2092 __ bind(¶m_map_size); |
| 2093 | 2093 |
| 2094 // 2. Backing store. | 2094 // 2. Backing store. |
| 2095 __ sll(t6, a2, 1); | 2095 __ sll(t6, a2, 1); |
| 2096 __ Addu(t5, t5, Operand(t6)); | 2096 __ Addu(t5, t5, Operand(t6)); |
| 2097 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); | 2097 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); |
| (...skipping 700 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2798 static void GenerateRecordCallTarget(MacroAssembler* masm) { | 2798 static void GenerateRecordCallTarget(MacroAssembler* masm) { |
| 2799 // Cache the called function in a feedback vector slot. Cache states | 2799 // Cache the called function in a feedback vector slot. Cache states |
| 2800 // are uninitialized, monomorphic (indicated by a JSFunction), and | 2800 // are uninitialized, monomorphic (indicated by a JSFunction), and |
| 2801 // megamorphic. | 2801 // megamorphic. |
| 2802 // a0 : number of arguments to the construct function | 2802 // a0 : number of arguments to the construct function |
| 2803 // a1 : the function to call | 2803 // a1 : the function to call |
| 2804 // a2 : Feedback vector | 2804 // a2 : Feedback vector |
| 2805 // a3 : slot in feedback vector (Smi) | 2805 // a3 : slot in feedback vector (Smi) |
| 2806 Label initialize, done, miss, megamorphic, not_array_function; | 2806 Label initialize, done, miss, megamorphic, not_array_function; |
| 2807 | 2807 |
| 2808 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | 2808 DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), |
| 2809 masm->isolate()->heap()->megamorphic_symbol()); | 2809 masm->isolate()->heap()->megamorphic_symbol()); |
| 2810 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), | 2810 DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), |
| 2811 masm->isolate()->heap()->uninitialized_symbol()); | 2811 masm->isolate()->heap()->uninitialized_symbol()); |
| 2812 | 2812 |
| 2813 // Load the cache state into t0. | 2813 // Load the cache state into t0. |
| 2814 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 2814 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); |
| 2815 __ Addu(t0, a2, Operand(t0)); | 2815 __ Addu(t0, a2, Operand(t0)); |
| 2816 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 2816 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); |
| 2817 | 2817 |
| 2818 // A monomorphic cache hit or an already megamorphic state: invoke the | 2818 // A monomorphic cache hit or an already megamorphic state: invoke the |
| 2819 // function without changing the state. | 2819 // function without changing the state. |
| 2820 __ Branch(&done, eq, t0, Operand(a1)); | 2820 __ Branch(&done, eq, t0, Operand(a1)); |
| (...skipping 398 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3219 } | 3219 } |
| 3220 | 3220 |
| 3221 | 3221 |
| 3222 // StringCharCodeAtGenerator. | 3222 // StringCharCodeAtGenerator. |
| 3223 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 3223 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 3224 Label flat_string; | 3224 Label flat_string; |
| 3225 Label ascii_string; | 3225 Label ascii_string; |
| 3226 Label got_char_code; | 3226 Label got_char_code; |
| 3227 Label sliced_string; | 3227 Label sliced_string; |
| 3228 | 3228 |
| 3229 ASSERT(!t0.is(index_)); | 3229 DCHECK(!t0.is(index_)); |
| 3230 ASSERT(!t0.is(result_)); | 3230 DCHECK(!t0.is(result_)); |
| 3231 ASSERT(!t0.is(object_)); | 3231 DCHECK(!t0.is(object_)); |
| 3232 | 3232 |
| 3233 // If the receiver is a smi trigger the non-string case. | 3233 // If the receiver is a smi trigger the non-string case. |
| 3234 __ JumpIfSmi(object_, receiver_not_string_); | 3234 __ JumpIfSmi(object_, receiver_not_string_); |
| 3235 | 3235 |
| 3236 // Fetch the instance type of the receiver into result register. | 3236 // Fetch the instance type of the receiver into result register. |
| 3237 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 3237 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 3238 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 3238 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 3239 // If the receiver is not a string trigger the non-string case. | 3239 // If the receiver is not a string trigger the non-string case. |
| 3240 __ And(t0, result_, Operand(kIsNotStringMask)); | 3240 __ And(t0, result_, Operand(kIsNotStringMask)); |
| 3241 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg)); | 3241 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg)); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3274 result_, | 3274 result_, |
| 3275 Heap::kHeapNumberMapRootIndex, | 3275 Heap::kHeapNumberMapRootIndex, |
| 3276 index_not_number_, | 3276 index_not_number_, |
| 3277 DONT_DO_SMI_CHECK); | 3277 DONT_DO_SMI_CHECK); |
| 3278 call_helper.BeforeCall(masm); | 3278 call_helper.BeforeCall(masm); |
| 3279 // Consumed by runtime conversion function: | 3279 // Consumed by runtime conversion function: |
| 3280 __ Push(object_, index_); | 3280 __ Push(object_, index_); |
| 3281 if (index_flags_ == STRING_INDEX_IS_NUMBER) { | 3281 if (index_flags_ == STRING_INDEX_IS_NUMBER) { |
| 3282 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | 3282 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); |
| 3283 } else { | 3283 } else { |
| 3284 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | 3284 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
| 3285 // NumberToSmi discards numbers that are not exact integers. | 3285 // NumberToSmi discards numbers that are not exact integers. |
| 3286 __ CallRuntime(Runtime::kNumberToSmi, 1); | 3286 __ CallRuntime(Runtime::kNumberToSmi, 1); |
| 3287 } | 3287 } |
| 3288 | 3288 |
| 3289 // Save the conversion result before the pop instructions below | 3289 // Save the conversion result before the pop instructions below |
| 3290 // have a chance to overwrite it. | 3290 // have a chance to overwrite it. |
| 3291 | 3291 |
| 3292 __ Move(index_, v0); | 3292 __ Move(index_, v0); |
| 3293 __ pop(object_); | 3293 __ pop(object_); |
| 3294 // Reload the instance type. | 3294 // Reload the instance type. |
| (...skipping 22 matching lines...) Expand all Loading... |
| 3317 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | 3317 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); |
| 3318 } | 3318 } |
| 3319 | 3319 |
| 3320 | 3320 |
| 3321 // ------------------------------------------------------------------------- | 3321 // ------------------------------------------------------------------------- |
| 3322 // StringCharFromCodeGenerator | 3322 // StringCharFromCodeGenerator |
| 3323 | 3323 |
| 3324 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 3324 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
| 3325 // Fast case of Heap::LookupSingleCharacterStringFromCode. | 3325 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
| 3326 | 3326 |
| 3327 ASSERT(!t0.is(result_)); | 3327 DCHECK(!t0.is(result_)); |
| 3328 ASSERT(!t0.is(code_)); | 3328 DCHECK(!t0.is(code_)); |
| 3329 | 3329 |
| 3330 STATIC_ASSERT(kSmiTag == 0); | 3330 STATIC_ASSERT(kSmiTag == 0); |
| 3331 STATIC_ASSERT(kSmiShiftSize == 0); | 3331 STATIC_ASSERT(kSmiShiftSize == 0); |
| 3332 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); | 3332 DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); |
| 3333 __ And(t0, | 3333 __ And(t0, |
| 3334 code_, | 3334 code_, |
| 3335 Operand(kSmiTagMask | | 3335 Operand(kSmiTagMask | |
| 3336 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); | 3336 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); |
| 3337 __ Branch(&slow_case_, ne, t0, Operand(zero_reg)); | 3337 __ Branch(&slow_case_, ne, t0, Operand(zero_reg)); |
| 3338 | 3338 |
| 3339 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 3339 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
| 3340 // At this point code register contains smi tagged ASCII char code. | 3340 // At this point code register contains smi tagged ASCII char code. |
| 3341 STATIC_ASSERT(kSmiTag == 0); | 3341 STATIC_ASSERT(kSmiTag == 0); |
| 3342 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize); | 3342 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize); |
| (...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3694 Register scratch2, | 3694 Register scratch2, |
| 3695 Register scratch3) { | 3695 Register scratch3) { |
| 3696 Register length = scratch1; | 3696 Register length = scratch1; |
| 3697 | 3697 |
| 3698 // Compare lengths. | 3698 // Compare lengths. |
| 3699 Label strings_not_equal, check_zero_length; | 3699 Label strings_not_equal, check_zero_length; |
| 3700 __ lw(length, FieldMemOperand(left, String::kLengthOffset)); | 3700 __ lw(length, FieldMemOperand(left, String::kLengthOffset)); |
| 3701 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 3701 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| 3702 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); | 3702 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); |
| 3703 __ bind(&strings_not_equal); | 3703 __ bind(&strings_not_equal); |
| 3704 ASSERT(is_int16(NOT_EQUAL)); | 3704 DCHECK(is_int16(NOT_EQUAL)); |
| 3705 __ Ret(USE_DELAY_SLOT); | 3705 __ Ret(USE_DELAY_SLOT); |
| 3706 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); | 3706 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); |
| 3707 | 3707 |
| 3708 // Check if the length is zero. | 3708 // Check if the length is zero. |
| 3709 Label compare_chars; | 3709 Label compare_chars; |
| 3710 __ bind(&check_zero_length); | 3710 __ bind(&check_zero_length); |
| 3711 STATIC_ASSERT(kSmiTag == 0); | 3711 STATIC_ASSERT(kSmiTag == 0); |
| 3712 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); | 3712 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); |
| 3713 ASSERT(is_int16(EQUAL)); | 3713 DCHECK(is_int16(EQUAL)); |
| 3714 __ Ret(USE_DELAY_SLOT); | 3714 __ Ret(USE_DELAY_SLOT); |
| 3715 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 3715 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 3716 | 3716 |
| 3717 // Compare characters. | 3717 // Compare characters. |
| 3718 __ bind(&compare_chars); | 3718 __ bind(&compare_chars); |
| 3719 | 3719 |
| 3720 GenerateAsciiCharsCompareLoop(masm, | 3720 GenerateAsciiCharsCompareLoop(masm, |
| 3721 left, right, length, scratch2, scratch3, v0, | 3721 left, right, length, scratch2, scratch3, v0, |
| 3722 &strings_not_equal); | 3722 &strings_not_equal); |
| 3723 | 3723 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 3746 STATIC_ASSERT(kSmiTag == 0); | 3746 STATIC_ASSERT(kSmiTag == 0); |
| 3747 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); | 3747 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); |
| 3748 | 3748 |
| 3749 // Compare loop. | 3749 // Compare loop. |
| 3750 GenerateAsciiCharsCompareLoop(masm, | 3750 GenerateAsciiCharsCompareLoop(masm, |
| 3751 left, right, min_length, scratch2, scratch4, v0, | 3751 left, right, min_length, scratch2, scratch4, v0, |
| 3752 &result_not_equal); | 3752 &result_not_equal); |
| 3753 | 3753 |
| 3754 // Compare lengths - strings up to min-length are equal. | 3754 // Compare lengths - strings up to min-length are equal. |
| 3755 __ bind(&compare_lengths); | 3755 __ bind(&compare_lengths); |
| 3756 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 3756 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
| 3757 // Use length_delta as result if it's zero. | 3757 // Use length_delta as result if it's zero. |
| 3758 __ mov(scratch2, length_delta); | 3758 __ mov(scratch2, length_delta); |
| 3759 __ mov(scratch4, zero_reg); | 3759 __ mov(scratch4, zero_reg); |
| 3760 __ mov(v0, zero_reg); | 3760 __ mov(v0, zero_reg); |
| 3761 | 3761 |
| 3762 __ bind(&result_not_equal); | 3762 __ bind(&result_not_equal); |
| 3763 // Conditionally update the result based either on length_delta or | 3763 // Conditionally update the result based either on length_delta or |
| 3764 // the last comparion performed in the loop above. | 3764 // the last comparion performed in the loop above. |
| 3765 Label ret; | 3765 Label ret; |
| 3766 __ Branch(&ret, eq, scratch2, Operand(scratch4)); | 3766 __ Branch(&ret, eq, scratch2, Operand(scratch4)); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3862 } | 3862 } |
| 3863 | 3863 |
| 3864 // Tail call into the stub that handles binary operations with allocation | 3864 // Tail call into the stub that handles binary operations with allocation |
| 3865 // sites. | 3865 // sites. |
| 3866 BinaryOpWithAllocationSiteStub stub(isolate(), state_); | 3866 BinaryOpWithAllocationSiteStub stub(isolate(), state_); |
| 3867 __ TailCallStub(&stub); | 3867 __ TailCallStub(&stub); |
| 3868 } | 3868 } |
| 3869 | 3869 |
| 3870 | 3870 |
| 3871 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 3871 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 3872 ASSERT(state_ == CompareIC::SMI); | 3872 DCHECK(state_ == CompareIC::SMI); |
| 3873 Label miss; | 3873 Label miss; |
| 3874 __ Or(a2, a1, a0); | 3874 __ Or(a2, a1, a0); |
| 3875 __ JumpIfNotSmi(a2, &miss); | 3875 __ JumpIfNotSmi(a2, &miss); |
| 3876 | 3876 |
| 3877 if (GetCondition() == eq) { | 3877 if (GetCondition() == eq) { |
| 3878 // For equality we do not care about the sign of the result. | 3878 // For equality we do not care about the sign of the result. |
| 3879 __ Ret(USE_DELAY_SLOT); | 3879 __ Ret(USE_DELAY_SLOT); |
| 3880 __ Subu(v0, a0, a1); | 3880 __ Subu(v0, a0, a1); |
| 3881 } else { | 3881 } else { |
| 3882 // Untag before subtracting to avoid handling overflow. | 3882 // Untag before subtracting to avoid handling overflow. |
| 3883 __ SmiUntag(a1); | 3883 __ SmiUntag(a1); |
| 3884 __ SmiUntag(a0); | 3884 __ SmiUntag(a0); |
| 3885 __ Ret(USE_DELAY_SLOT); | 3885 __ Ret(USE_DELAY_SLOT); |
| 3886 __ Subu(v0, a1, a0); | 3886 __ Subu(v0, a1, a0); |
| 3887 } | 3887 } |
| 3888 | 3888 |
| 3889 __ bind(&miss); | 3889 __ bind(&miss); |
| 3890 GenerateMiss(masm); | 3890 GenerateMiss(masm); |
| 3891 } | 3891 } |
| 3892 | 3892 |
| 3893 | 3893 |
| 3894 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 3894 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
| 3895 ASSERT(state_ == CompareIC::NUMBER); | 3895 DCHECK(state_ == CompareIC::NUMBER); |
| 3896 | 3896 |
| 3897 Label generic_stub; | 3897 Label generic_stub; |
| 3898 Label unordered, maybe_undefined1, maybe_undefined2; | 3898 Label unordered, maybe_undefined1, maybe_undefined2; |
| 3899 Label miss; | 3899 Label miss; |
| 3900 | 3900 |
| 3901 if (left_ == CompareIC::SMI) { | 3901 if (left_ == CompareIC::SMI) { |
| 3902 __ JumpIfNotSmi(a1, &miss); | 3902 __ JumpIfNotSmi(a1, &miss); |
| 3903 } | 3903 } |
| 3904 if (right_ == CompareIC::SMI) { | 3904 if (right_ == CompareIC::SMI) { |
| 3905 __ JumpIfNotSmi(a0, &miss); | 3905 __ JumpIfNotSmi(a0, &miss); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3938 | 3938 |
| 3939 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. | 3939 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. |
| 3940 Label fpu_eq, fpu_lt; | 3940 Label fpu_eq, fpu_lt; |
| 3941 // Test if equal, and also handle the unordered/NaN case. | 3941 // Test if equal, and also handle the unordered/NaN case. |
| 3942 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); | 3942 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); |
| 3943 | 3943 |
| 3944 // Test if less (unordered case is already handled). | 3944 // Test if less (unordered case is already handled). |
| 3945 __ BranchF(&fpu_lt, NULL, lt, f0, f2); | 3945 __ BranchF(&fpu_lt, NULL, lt, f0, f2); |
| 3946 | 3946 |
| 3947 // Otherwise it's greater, so just fall thru, and return. | 3947 // Otherwise it's greater, so just fall thru, and return. |
| 3948 ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); | 3948 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS)); |
| 3949 __ Ret(USE_DELAY_SLOT); | 3949 __ Ret(USE_DELAY_SLOT); |
| 3950 __ li(v0, Operand(GREATER)); | 3950 __ li(v0, Operand(GREATER)); |
| 3951 | 3951 |
| 3952 __ bind(&fpu_eq); | 3952 __ bind(&fpu_eq); |
| 3953 __ Ret(USE_DELAY_SLOT); | 3953 __ Ret(USE_DELAY_SLOT); |
| 3954 __ li(v0, Operand(EQUAL)); | 3954 __ li(v0, Operand(EQUAL)); |
| 3955 | 3955 |
| 3956 __ bind(&fpu_lt); | 3956 __ bind(&fpu_lt); |
| 3957 __ Ret(USE_DELAY_SLOT); | 3957 __ Ret(USE_DELAY_SLOT); |
| 3958 __ li(v0, Operand(LESS)); | 3958 __ li(v0, Operand(LESS)); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 3978 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 3978 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 3979 __ Branch(&unordered, eq, a1, Operand(at)); | 3979 __ Branch(&unordered, eq, a1, Operand(at)); |
| 3980 } | 3980 } |
| 3981 | 3981 |
| 3982 __ bind(&miss); | 3982 __ bind(&miss); |
| 3983 GenerateMiss(masm); | 3983 GenerateMiss(masm); |
| 3984 } | 3984 } |
| 3985 | 3985 |
| 3986 | 3986 |
| 3987 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { | 3987 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { |
| 3988 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); | 3988 DCHECK(state_ == CompareIC::INTERNALIZED_STRING); |
| 3989 Label miss; | 3989 Label miss; |
| 3990 | 3990 |
| 3991 // Registers containing left and right operands respectively. | 3991 // Registers containing left and right operands respectively. |
| 3992 Register left = a1; | 3992 Register left = a1; |
| 3993 Register right = a0; | 3993 Register right = a0; |
| 3994 Register tmp1 = a2; | 3994 Register tmp1 = a2; |
| 3995 Register tmp2 = a3; | 3995 Register tmp2 = a3; |
| 3996 | 3996 |
| 3997 // Check that both operands are heap objects. | 3997 // Check that both operands are heap objects. |
| 3998 __ JumpIfEitherSmi(left, right, &miss); | 3998 __ JumpIfEitherSmi(left, right, &miss); |
| 3999 | 3999 |
| 4000 // Check that both operands are internalized strings. | 4000 // Check that both operands are internalized strings. |
| 4001 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 4001 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 4002 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 4002 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 4003 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 4003 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 4004 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 4004 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 4005 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 4005 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
| 4006 __ Or(tmp1, tmp1, Operand(tmp2)); | 4006 __ Or(tmp1, tmp1, Operand(tmp2)); |
| 4007 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 4007 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
| 4008 __ Branch(&miss, ne, at, Operand(zero_reg)); | 4008 __ Branch(&miss, ne, at, Operand(zero_reg)); |
| 4009 | 4009 |
| 4010 // Make sure a0 is non-zero. At this point input operands are | 4010 // Make sure a0 is non-zero. At this point input operands are |
| 4011 // guaranteed to be non-zero. | 4011 // guaranteed to be non-zero. |
| 4012 ASSERT(right.is(a0)); | 4012 DCHECK(right.is(a0)); |
| 4013 STATIC_ASSERT(EQUAL == 0); | 4013 STATIC_ASSERT(EQUAL == 0); |
| 4014 STATIC_ASSERT(kSmiTag == 0); | 4014 STATIC_ASSERT(kSmiTag == 0); |
| 4015 __ mov(v0, right); | 4015 __ mov(v0, right); |
| 4016 // Internalized strings are compared by identity. | 4016 // Internalized strings are compared by identity. |
| 4017 __ Ret(ne, left, Operand(right)); | 4017 __ Ret(ne, left, Operand(right)); |
| 4018 ASSERT(is_int16(EQUAL)); | 4018 DCHECK(is_int16(EQUAL)); |
| 4019 __ Ret(USE_DELAY_SLOT); | 4019 __ Ret(USE_DELAY_SLOT); |
| 4020 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 4020 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 4021 | 4021 |
| 4022 __ bind(&miss); | 4022 __ bind(&miss); |
| 4023 GenerateMiss(masm); | 4023 GenerateMiss(masm); |
| 4024 } | 4024 } |
| 4025 | 4025 |
| 4026 | 4026 |
| 4027 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | 4027 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { |
| 4028 ASSERT(state_ == CompareIC::UNIQUE_NAME); | 4028 DCHECK(state_ == CompareIC::UNIQUE_NAME); |
| 4029 ASSERT(GetCondition() == eq); | 4029 DCHECK(GetCondition() == eq); |
| 4030 Label miss; | 4030 Label miss; |
| 4031 | 4031 |
| 4032 // Registers containing left and right operands respectively. | 4032 // Registers containing left and right operands respectively. |
| 4033 Register left = a1; | 4033 Register left = a1; |
| 4034 Register right = a0; | 4034 Register right = a0; |
| 4035 Register tmp1 = a2; | 4035 Register tmp1 = a2; |
| 4036 Register tmp2 = a3; | 4036 Register tmp2 = a3; |
| 4037 | 4037 |
| 4038 // Check that both operands are heap objects. | 4038 // Check that both operands are heap objects. |
| 4039 __ JumpIfEitherSmi(left, right, &miss); | 4039 __ JumpIfEitherSmi(left, right, &miss); |
| 4040 | 4040 |
| 4041 // Check that both operands are unique names. This leaves the instance | 4041 // Check that both operands are unique names. This leaves the instance |
| 4042 // types loaded in tmp1 and tmp2. | 4042 // types loaded in tmp1 and tmp2. |
| 4043 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 4043 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 4044 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 4044 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 4045 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 4045 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 4046 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 4046 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 4047 | 4047 |
| 4048 __ JumpIfNotUniqueName(tmp1, &miss); | 4048 __ JumpIfNotUniqueName(tmp1, &miss); |
| 4049 __ JumpIfNotUniqueName(tmp2, &miss); | 4049 __ JumpIfNotUniqueName(tmp2, &miss); |
| 4050 | 4050 |
| 4051 // Use a0 as result | 4051 // Use a0 as result |
| 4052 __ mov(v0, a0); | 4052 __ mov(v0, a0); |
| 4053 | 4053 |
| 4054 // Unique names are compared by identity. | 4054 // Unique names are compared by identity. |
| 4055 Label done; | 4055 Label done; |
| 4056 __ Branch(&done, ne, left, Operand(right)); | 4056 __ Branch(&done, ne, left, Operand(right)); |
| 4057 // Make sure a0 is non-zero. At this point input operands are | 4057 // Make sure a0 is non-zero. At this point input operands are |
| 4058 // guaranteed to be non-zero. | 4058 // guaranteed to be non-zero. |
| 4059 ASSERT(right.is(a0)); | 4059 DCHECK(right.is(a0)); |
| 4060 STATIC_ASSERT(EQUAL == 0); | 4060 STATIC_ASSERT(EQUAL == 0); |
| 4061 STATIC_ASSERT(kSmiTag == 0); | 4061 STATIC_ASSERT(kSmiTag == 0); |
| 4062 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 4062 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 4063 __ bind(&done); | 4063 __ bind(&done); |
| 4064 __ Ret(); | 4064 __ Ret(); |
| 4065 | 4065 |
| 4066 __ bind(&miss); | 4066 __ bind(&miss); |
| 4067 GenerateMiss(masm); | 4067 GenerateMiss(masm); |
| 4068 } | 4068 } |
| 4069 | 4069 |
| 4070 | 4070 |
| 4071 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 4071 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
| 4072 ASSERT(state_ == CompareIC::STRING); | 4072 DCHECK(state_ == CompareIC::STRING); |
| 4073 Label miss; | 4073 Label miss; |
| 4074 | 4074 |
| 4075 bool equality = Token::IsEqualityOp(op_); | 4075 bool equality = Token::IsEqualityOp(op_); |
| 4076 | 4076 |
| 4077 // Registers containing left and right operands respectively. | 4077 // Registers containing left and right operands respectively. |
| 4078 Register left = a1; | 4078 Register left = a1; |
| 4079 Register right = a0; | 4079 Register right = a0; |
| 4080 Register tmp1 = a2; | 4080 Register tmp1 = a2; |
| 4081 Register tmp2 = a3; | 4081 Register tmp2 = a3; |
| 4082 Register tmp3 = t0; | 4082 Register tmp3 = t0; |
| (...skipping 22 matching lines...) Expand all Loading... |
| 4105 __ Ret(USE_DELAY_SLOT); | 4105 __ Ret(USE_DELAY_SLOT); |
| 4106 __ mov(v0, zero_reg); // In the delay slot. | 4106 __ mov(v0, zero_reg); // In the delay slot. |
| 4107 __ bind(&left_ne_right); | 4107 __ bind(&left_ne_right); |
| 4108 | 4108 |
| 4109 // Handle not identical strings. | 4109 // Handle not identical strings. |
| 4110 | 4110 |
| 4111 // Check that both strings are internalized strings. If they are, we're done | 4111 // Check that both strings are internalized strings. If they are, we're done |
| 4112 // because we already know they are not identical. We know they are both | 4112 // because we already know they are not identical. We know they are both |
| 4113 // strings. | 4113 // strings. |
| 4114 if (equality) { | 4114 if (equality) { |
| 4115 ASSERT(GetCondition() == eq); | 4115 DCHECK(GetCondition() == eq); |
| 4116 STATIC_ASSERT(kInternalizedTag == 0); | 4116 STATIC_ASSERT(kInternalizedTag == 0); |
| 4117 __ Or(tmp3, tmp1, Operand(tmp2)); | 4117 __ Or(tmp3, tmp1, Operand(tmp2)); |
| 4118 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask)); | 4118 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask)); |
| 4119 Label is_symbol; | 4119 Label is_symbol; |
| 4120 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg)); | 4120 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg)); |
| 4121 // Make sure a0 is non-zero. At this point input operands are | 4121 // Make sure a0 is non-zero. At this point input operands are |
| 4122 // guaranteed to be non-zero. | 4122 // guaranteed to be non-zero. |
| 4123 ASSERT(right.is(a0)); | 4123 DCHECK(right.is(a0)); |
| 4124 __ Ret(USE_DELAY_SLOT); | 4124 __ Ret(USE_DELAY_SLOT); |
| 4125 __ mov(v0, a0); // In the delay slot. | 4125 __ mov(v0, a0); // In the delay slot. |
| 4126 __ bind(&is_symbol); | 4126 __ bind(&is_symbol); |
| 4127 } | 4127 } |
| 4128 | 4128 |
| 4129 // Check that both strings are sequential ASCII. | 4129 // Check that both strings are sequential ASCII. |
| 4130 Label runtime; | 4130 Label runtime; |
| 4131 __ JumpIfBothInstanceTypesAreNotSequentialAscii( | 4131 __ JumpIfBothInstanceTypesAreNotSequentialAscii( |
| 4132 tmp1, tmp2, tmp3, tmp4, &runtime); | 4132 tmp1, tmp2, tmp3, tmp4, &runtime); |
| 4133 | 4133 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 4148 } else { | 4148 } else { |
| 4149 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 4149 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 4150 } | 4150 } |
| 4151 | 4151 |
| 4152 __ bind(&miss); | 4152 __ bind(&miss); |
| 4153 GenerateMiss(masm); | 4153 GenerateMiss(masm); |
| 4154 } | 4154 } |
| 4155 | 4155 |
| 4156 | 4156 |
| 4157 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 4157 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
| 4158 ASSERT(state_ == CompareIC::OBJECT); | 4158 DCHECK(state_ == CompareIC::OBJECT); |
| 4159 Label miss; | 4159 Label miss; |
| 4160 __ And(a2, a1, Operand(a0)); | 4160 __ And(a2, a1, Operand(a0)); |
| 4161 __ JumpIfSmi(a2, &miss); | 4161 __ JumpIfSmi(a2, &miss); |
| 4162 | 4162 |
| 4163 __ GetObjectType(a0, a2, a2); | 4163 __ GetObjectType(a0, a2, a2); |
| 4164 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); | 4164 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
| 4165 __ GetObjectType(a1, a2, a2); | 4165 __ GetObjectType(a1, a2, a2); |
| 4166 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); | 4166 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
| 4167 | 4167 |
| 4168 ASSERT(GetCondition() == eq); | 4168 DCHECK(GetCondition() == eq); |
| 4169 __ Ret(USE_DELAY_SLOT); | 4169 __ Ret(USE_DELAY_SLOT); |
| 4170 __ subu(v0, a0, a1); | 4170 __ subu(v0, a0, a1); |
| 4171 | 4171 |
| 4172 __ bind(&miss); | 4172 __ bind(&miss); |
| 4173 GenerateMiss(masm); | 4173 GenerateMiss(masm); |
| 4174 } | 4174 } |
| 4175 | 4175 |
| 4176 | 4176 |
| 4177 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | 4177 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { |
| 4178 Label miss; | 4178 Label miss; |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4247 } | 4247 } |
| 4248 | 4248 |
| 4249 | 4249 |
| 4250 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | 4250 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, |
| 4251 Label* miss, | 4251 Label* miss, |
| 4252 Label* done, | 4252 Label* done, |
| 4253 Register receiver, | 4253 Register receiver, |
| 4254 Register properties, | 4254 Register properties, |
| 4255 Handle<Name> name, | 4255 Handle<Name> name, |
| 4256 Register scratch0) { | 4256 Register scratch0) { |
| 4257 ASSERT(name->IsUniqueName()); | 4257 DCHECK(name->IsUniqueName()); |
| 4258 // If names of slots in range from 1 to kProbes - 1 for the hash value are | 4258 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
| 4259 // not equal to the name and kProbes-th slot is not used (its name is the | 4259 // not equal to the name and kProbes-th slot is not used (its name is the |
| 4260 // undefined value), it guarantees the hash table doesn't contain the | 4260 // undefined value), it guarantees the hash table doesn't contain the |
| 4261 // property. It's true even if some slots represent deleted properties | 4261 // property. It's true even if some slots represent deleted properties |
| 4262 // (their names are the hole value). | 4262 // (their names are the hole value). |
| 4263 for (int i = 0; i < kInlinedProbes; i++) { | 4263 for (int i = 0; i < kInlinedProbes; i++) { |
| 4264 // scratch0 points to properties hash. | 4264 // scratch0 points to properties hash. |
| 4265 // Compute the masked index: (hash + i + i * i) & mask. | 4265 // Compute the masked index: (hash + i + i * i) & mask. |
| 4266 Register index = scratch0; | 4266 Register index = scratch0; |
| 4267 // Capacity is smi 2^n. | 4267 // Capacity is smi 2^n. |
| 4268 __ lw(index, FieldMemOperand(properties, kCapacityOffset)); | 4268 __ lw(index, FieldMemOperand(properties, kCapacityOffset)); |
| 4269 __ Subu(index, index, Operand(1)); | 4269 __ Subu(index, index, Operand(1)); |
| 4270 __ And(index, index, Operand( | 4270 __ And(index, index, Operand( |
| 4271 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); | 4271 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); |
| 4272 | 4272 |
| 4273 // Scale the index by multiplying by the entry size. | 4273 // Scale the index by multiplying by the entry size. |
| 4274 ASSERT(NameDictionary::kEntrySize == 3); | 4274 DCHECK(NameDictionary::kEntrySize == 3); |
| 4275 __ sll(at, index, 1); | 4275 __ sll(at, index, 1); |
| 4276 __ Addu(index, index, at); | 4276 __ Addu(index, index, at); |
| 4277 | 4277 |
| 4278 Register entity_name = scratch0; | 4278 Register entity_name = scratch0; |
| 4279 // Having undefined at this place means the name is not contained. | 4279 // Having undefined at this place means the name is not contained. |
| 4280 ASSERT_EQ(kSmiTagSize, 1); | 4280 DCHECK_EQ(kSmiTagSize, 1); |
| 4281 Register tmp = properties; | 4281 Register tmp = properties; |
| 4282 __ sll(scratch0, index, 1); | 4282 __ sll(scratch0, index, 1); |
| 4283 __ Addu(tmp, properties, scratch0); | 4283 __ Addu(tmp, properties, scratch0); |
| 4284 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 4284 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
| 4285 | 4285 |
| 4286 ASSERT(!tmp.is(entity_name)); | 4286 DCHECK(!tmp.is(entity_name)); |
| 4287 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 4287 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
| 4288 __ Branch(done, eq, entity_name, Operand(tmp)); | 4288 __ Branch(done, eq, entity_name, Operand(tmp)); |
| 4289 | 4289 |
| 4290 // Load the hole ready for use below: | 4290 // Load the hole ready for use below: |
| 4291 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); | 4291 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
| 4292 | 4292 |
| 4293 // Stop if found the property. | 4293 // Stop if found the property. |
| 4294 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); | 4294 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); |
| 4295 | 4295 |
| 4296 Label good; | 4296 Label good; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4329 // |done| label if a property with the given name is found. Jump to | 4329 // |done| label if a property with the given name is found. Jump to |
| 4330 // the |miss| label otherwise. | 4330 // the |miss| label otherwise. |
| 4331 // If lookup was successful |scratch2| will be equal to elements + 4 * index. | 4331 // If lookup was successful |scratch2| will be equal to elements + 4 * index. |
| 4332 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, | 4332 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, |
| 4333 Label* miss, | 4333 Label* miss, |
| 4334 Label* done, | 4334 Label* done, |
| 4335 Register elements, | 4335 Register elements, |
| 4336 Register name, | 4336 Register name, |
| 4337 Register scratch1, | 4337 Register scratch1, |
| 4338 Register scratch2) { | 4338 Register scratch2) { |
| 4339 ASSERT(!elements.is(scratch1)); | 4339 DCHECK(!elements.is(scratch1)); |
| 4340 ASSERT(!elements.is(scratch2)); | 4340 DCHECK(!elements.is(scratch2)); |
| 4341 ASSERT(!name.is(scratch1)); | 4341 DCHECK(!name.is(scratch1)); |
| 4342 ASSERT(!name.is(scratch2)); | 4342 DCHECK(!name.is(scratch2)); |
| 4343 | 4343 |
| 4344 __ AssertName(name); | 4344 __ AssertName(name); |
| 4345 | 4345 |
| 4346 // Compute the capacity mask. | 4346 // Compute the capacity mask. |
| 4347 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 4347 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
| 4348 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int | 4348 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int |
| 4349 __ Subu(scratch1, scratch1, Operand(1)); | 4349 __ Subu(scratch1, scratch1, Operand(1)); |
| 4350 | 4350 |
| 4351 // Generate an unrolled loop that performs a few probes before | 4351 // Generate an unrolled loop that performs a few probes before |
| 4352 // giving up. Measurements done on Gmail indicate that 2 probes | 4352 // giving up. Measurements done on Gmail indicate that 2 probes |
| 4353 // cover ~93% of loads from dictionaries. | 4353 // cover ~93% of loads from dictionaries. |
| 4354 for (int i = 0; i < kInlinedProbes; i++) { | 4354 for (int i = 0; i < kInlinedProbes; i++) { |
| 4355 // Compute the masked index: (hash + i + i * i) & mask. | 4355 // Compute the masked index: (hash + i + i * i) & mask. |
| 4356 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 4356 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
| 4357 if (i > 0) { | 4357 if (i > 0) { |
| 4358 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4358 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 4359 // the hash in a separate instruction. The value hash + i + i * i is right | 4359 // the hash in a separate instruction. The value hash + i + i * i is right |
| 4360 // shifted in the following and instruction. | 4360 // shifted in the following and instruction. |
| 4361 ASSERT(NameDictionary::GetProbeOffset(i) < | 4361 DCHECK(NameDictionary::GetProbeOffset(i) < |
| 4362 1 << (32 - Name::kHashFieldOffset)); | 4362 1 << (32 - Name::kHashFieldOffset)); |
| 4363 __ Addu(scratch2, scratch2, Operand( | 4363 __ Addu(scratch2, scratch2, Operand( |
| 4364 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4364 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| 4365 } | 4365 } |
| 4366 __ srl(scratch2, scratch2, Name::kHashShift); | 4366 __ srl(scratch2, scratch2, Name::kHashShift); |
| 4367 __ And(scratch2, scratch1, scratch2); | 4367 __ And(scratch2, scratch1, scratch2); |
| 4368 | 4368 |
| 4369 // Scale the index by multiplying by the element size. | 4369 // Scale the index by multiplying by the element size. |
| 4370 ASSERT(NameDictionary::kEntrySize == 3); | 4370 DCHECK(NameDictionary::kEntrySize == 3); |
| 4371 // scratch2 = scratch2 * 3. | 4371 // scratch2 = scratch2 * 3. |
| 4372 | 4372 |
| 4373 __ sll(at, scratch2, 1); | 4373 __ sll(at, scratch2, 1); |
| 4374 __ Addu(scratch2, scratch2, at); | 4374 __ Addu(scratch2, scratch2, at); |
| 4375 | 4375 |
| 4376 // Check if the key is identical to the name. | 4376 // Check if the key is identical to the name. |
| 4377 __ sll(at, scratch2, 2); | 4377 __ sll(at, scratch2, 2); |
| 4378 __ Addu(scratch2, elements, at); | 4378 __ Addu(scratch2, elements, at); |
| 4379 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset)); | 4379 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset)); |
| 4380 __ Branch(done, eq, name, Operand(at)); | 4380 __ Branch(done, eq, name, Operand(at)); |
| 4381 } | 4381 } |
| 4382 | 4382 |
| 4383 const int spill_mask = | 4383 const int spill_mask = |
| 4384 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | | 4384 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | |
| 4385 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & | 4385 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & |
| 4386 ~(scratch1.bit() | scratch2.bit()); | 4386 ~(scratch1.bit() | scratch2.bit()); |
| 4387 | 4387 |
| 4388 __ MultiPush(spill_mask); | 4388 __ MultiPush(spill_mask); |
| 4389 if (name.is(a0)) { | 4389 if (name.is(a0)) { |
| 4390 ASSERT(!elements.is(a1)); | 4390 DCHECK(!elements.is(a1)); |
| 4391 __ Move(a1, name); | 4391 __ Move(a1, name); |
| 4392 __ Move(a0, elements); | 4392 __ Move(a0, elements); |
| 4393 } else { | 4393 } else { |
| 4394 __ Move(a0, elements); | 4394 __ Move(a0, elements); |
| 4395 __ Move(a1, name); | 4395 __ Move(a1, name); |
| 4396 } | 4396 } |
| 4397 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); | 4397 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); |
| 4398 __ CallStub(&stub); | 4398 __ CallStub(&stub); |
| 4399 __ mov(scratch2, a2); | 4399 __ mov(scratch2, a2); |
| 4400 __ mov(at, v0); | 4400 __ mov(at, v0); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4436 | 4436 |
| 4437 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 4437 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 4438 | 4438 |
| 4439 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 4439 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
| 4440 // Compute the masked index: (hash + i + i * i) & mask. | 4440 // Compute the masked index: (hash + i + i * i) & mask. |
| 4441 // Capacity is smi 2^n. | 4441 // Capacity is smi 2^n. |
| 4442 if (i > 0) { | 4442 if (i > 0) { |
| 4443 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4443 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 4444 // the hash in a separate instruction. The value hash + i + i * i is right | 4444 // the hash in a separate instruction. The value hash + i + i * i is right |
| 4445 // shifted in the following and instruction. | 4445 // shifted in the following and instruction. |
| 4446 ASSERT(NameDictionary::GetProbeOffset(i) < | 4446 DCHECK(NameDictionary::GetProbeOffset(i) < |
| 4447 1 << (32 - Name::kHashFieldOffset)); | 4447 1 << (32 - Name::kHashFieldOffset)); |
| 4448 __ Addu(index, hash, Operand( | 4448 __ Addu(index, hash, Operand( |
| 4449 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4449 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| 4450 } else { | 4450 } else { |
| 4451 __ mov(index, hash); | 4451 __ mov(index, hash); |
| 4452 } | 4452 } |
| 4453 __ srl(index, index, Name::kHashShift); | 4453 __ srl(index, index, Name::kHashShift); |
| 4454 __ And(index, mask, index); | 4454 __ And(index, mask, index); |
| 4455 | 4455 |
| 4456 // Scale the index by multiplying by the entry size. | 4456 // Scale the index by multiplying by the entry size. |
| 4457 ASSERT(NameDictionary::kEntrySize == 3); | 4457 DCHECK(NameDictionary::kEntrySize == 3); |
| 4458 // index *= 3. | 4458 // index *= 3. |
| 4459 __ mov(at, index); | 4459 __ mov(at, index); |
| 4460 __ sll(index, index, 1); | 4460 __ sll(index, index, 1); |
| 4461 __ Addu(index, index, at); | 4461 __ Addu(index, index, at); |
| 4462 | 4462 |
| 4463 | 4463 |
| 4464 ASSERT_EQ(kSmiTagSize, 1); | 4464 DCHECK_EQ(kSmiTagSize, 1); |
| 4465 __ sll(index, index, 2); | 4465 __ sll(index, index, 2); |
| 4466 __ Addu(index, index, dictionary); | 4466 __ Addu(index, index, dictionary); |
| 4467 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 4467 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
| 4468 | 4468 |
| 4469 // Having undefined at this place means the name is not contained. | 4469 // Having undefined at this place means the name is not contained. |
| 4470 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); | 4470 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); |
| 4471 | 4471 |
| 4472 // Stop if found the property. | 4472 // Stop if found the property. |
| 4473 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); | 4473 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); |
| 4474 | 4474 |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4591 __ Ret(); | 4591 __ Ret(); |
| 4592 } | 4592 } |
| 4593 | 4593 |
| 4594 | 4594 |
| 4595 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | 4595 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { |
| 4596 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | 4596 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
| 4597 int argument_count = 3; | 4597 int argument_count = 3; |
| 4598 __ PrepareCallCFunction(argument_count, regs_.scratch0()); | 4598 __ PrepareCallCFunction(argument_count, regs_.scratch0()); |
| 4599 Register address = | 4599 Register address = |
| 4600 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 4600 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); |
| 4601 ASSERT(!address.is(regs_.object())); | 4601 DCHECK(!address.is(regs_.object())); |
| 4602 ASSERT(!address.is(a0)); | 4602 DCHECK(!address.is(a0)); |
| 4603 __ Move(address, regs_.address()); | 4603 __ Move(address, regs_.address()); |
| 4604 __ Move(a0, regs_.object()); | 4604 __ Move(a0, regs_.object()); |
| 4605 __ Move(a1, address); | 4605 __ Move(a1, address); |
| 4606 __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); | 4606 __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); |
| 4607 | 4607 |
| 4608 AllowExternalCallThatCantCauseGC scope(masm); | 4608 AllowExternalCallThatCantCauseGC scope(masm); |
| 4609 __ CallCFunction( | 4609 __ CallCFunction( |
| 4610 ExternalReference::incremental_marking_record_write_function(isolate()), | 4610 ExternalReference::incremental_marking_record_write_function(isolate()), |
| 4611 argument_count); | 4611 argument_count); |
| 4612 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); | 4612 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); |
| (...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4813 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart)); | 4813 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart)); |
| 4814 | 4814 |
| 4815 // The caller's return address is above the saved temporaries. | 4815 // The caller's return address is above the saved temporaries. |
| 4816 // Grab that for the second argument to the hook. | 4816 // Grab that for the second argument to the hook. |
| 4817 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize)); | 4817 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize)); |
| 4818 | 4818 |
| 4819 // Align the stack if necessary. | 4819 // Align the stack if necessary. |
| 4820 int frame_alignment = masm->ActivationFrameAlignment(); | 4820 int frame_alignment = masm->ActivationFrameAlignment(); |
| 4821 if (frame_alignment > kPointerSize) { | 4821 if (frame_alignment > kPointerSize) { |
| 4822 __ mov(s5, sp); | 4822 __ mov(s5, sp); |
| 4823 ASSERT(IsPowerOf2(frame_alignment)); | 4823 DCHECK(IsPowerOf2(frame_alignment)); |
| 4824 __ And(sp, sp, Operand(-frame_alignment)); | 4824 __ And(sp, sp, Operand(-frame_alignment)); |
| 4825 } | 4825 } |
| 4826 __ Subu(sp, sp, kCArgsSlotsSize); | 4826 __ Subu(sp, sp, kCArgsSlotsSize); |
| 4827 #if defined(V8_HOST_ARCH_MIPS) | 4827 #if defined(V8_HOST_ARCH_MIPS) |
| 4828 int32_t entry_hook = | 4828 int32_t entry_hook = |
| 4829 reinterpret_cast<int32_t>(isolate()->function_entry_hook()); | 4829 reinterpret_cast<int32_t>(isolate()->function_entry_hook()); |
| 4830 __ li(t9, Operand(entry_hook)); | 4830 __ li(t9, Operand(entry_hook)); |
| 4831 #else | 4831 #else |
| 4832 // Under the simulator we need to indirect the entry hook through a | 4832 // Under the simulator we need to indirect the entry hook through a |
| 4833 // trampoline function at a known address. | 4833 // trampoline function at a known address. |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4880 | 4880 |
| 4881 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | 4881 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, |
| 4882 AllocationSiteOverrideMode mode) { | 4882 AllocationSiteOverrideMode mode) { |
| 4883 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | 4883 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) |
| 4884 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES) | 4884 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES) |
| 4885 // a0 - number of arguments | 4885 // a0 - number of arguments |
| 4886 // a1 - constructor? | 4886 // a1 - constructor? |
| 4887 // sp[0] - last argument | 4887 // sp[0] - last argument |
| 4888 Label normal_sequence; | 4888 Label normal_sequence; |
| 4889 if (mode == DONT_OVERRIDE) { | 4889 if (mode == DONT_OVERRIDE) { |
| 4890 ASSERT(FAST_SMI_ELEMENTS == 0); | 4890 DCHECK(FAST_SMI_ELEMENTS == 0); |
| 4891 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 4891 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 4892 ASSERT(FAST_ELEMENTS == 2); | 4892 DCHECK(FAST_ELEMENTS == 2); |
| 4893 ASSERT(FAST_HOLEY_ELEMENTS == 3); | 4893 DCHECK(FAST_HOLEY_ELEMENTS == 3); |
| 4894 ASSERT(FAST_DOUBLE_ELEMENTS == 4); | 4894 DCHECK(FAST_DOUBLE_ELEMENTS == 4); |
| 4895 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | 4895 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); |
| 4896 | 4896 |
| 4897 // is the low bit set? If so, we are holey and that is good. | 4897 // is the low bit set? If so, we are holey and that is good. |
| 4898 __ And(at, a3, Operand(1)); | 4898 __ And(at, a3, Operand(1)); |
| 4899 __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); | 4899 __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); |
| 4900 } | 4900 } |
| 4901 | 4901 |
| 4902 // look at the first argument | 4902 // look at the first argument |
| 4903 __ lw(t1, MemOperand(sp, 0)); | 4903 __ lw(t1, MemOperand(sp, 0)); |
| 4904 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg)); | 4904 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg)); |
| 4905 | 4905 |
| (...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5193 // Prepare arguments. | 5193 // Prepare arguments. |
| 5194 __ mov(scratch, sp); | 5194 __ mov(scratch, sp); |
| 5195 | 5195 |
| 5196 // Allocate the v8::Arguments structure in the arguments' space since | 5196 // Allocate the v8::Arguments structure in the arguments' space since |
| 5197 // it's not controlled by GC. | 5197 // it's not controlled by GC. |
| 5198 const int kApiStackSpace = 4; | 5198 const int kApiStackSpace = 4; |
| 5199 | 5199 |
| 5200 FrameScope frame_scope(masm, StackFrame::MANUAL); | 5200 FrameScope frame_scope(masm, StackFrame::MANUAL); |
| 5201 __ EnterExitFrame(false, kApiStackSpace); | 5201 __ EnterExitFrame(false, kApiStackSpace); |
| 5202 | 5202 |
| 5203 ASSERT(!api_function_address.is(a0) && !scratch.is(a0)); | 5203 DCHECK(!api_function_address.is(a0) && !scratch.is(a0)); |
| 5204 // a0 = FunctionCallbackInfo& | 5204 // a0 = FunctionCallbackInfo& |
| 5205 // Arguments is after the return address. | 5205 // Arguments is after the return address. |
| 5206 __ Addu(a0, sp, Operand(1 * kPointerSize)); | 5206 __ Addu(a0, sp, Operand(1 * kPointerSize)); |
| 5207 // FunctionCallbackInfo::implicit_args_ | 5207 // FunctionCallbackInfo::implicit_args_ |
| 5208 __ sw(scratch, MemOperand(a0, 0 * kPointerSize)); | 5208 __ sw(scratch, MemOperand(a0, 0 * kPointerSize)); |
| 5209 // FunctionCallbackInfo::values_ | 5209 // FunctionCallbackInfo::values_ |
| 5210 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | 5210 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); |
| 5211 __ sw(at, MemOperand(a0, 1 * kPointerSize)); | 5211 __ sw(at, MemOperand(a0, 1 * kPointerSize)); |
| 5212 // FunctionCallbackInfo::length_ = argc | 5212 // FunctionCallbackInfo::length_ = argc |
| 5213 __ li(at, Operand(argc)); | 5213 __ li(at, Operand(argc)); |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5271 MemOperand(fp, 6 * kPointerSize), | 5271 MemOperand(fp, 6 * kPointerSize), |
| 5272 NULL); | 5272 NULL); |
| 5273 } | 5273 } |
| 5274 | 5274 |
| 5275 | 5275 |
| 5276 #undef __ | 5276 #undef __ |
| 5277 | 5277 |
| 5278 } } // namespace v8::internal | 5278 } } // namespace v8::internal |
| 5279 | 5279 |
| 5280 #endif // V8_TARGET_ARCH_MIPS | 5280 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |