OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_ARM |
8 | 8 |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 359 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
370 | 370 |
371 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | 371 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
372 // Update the static counter each time a new code stub is generated. | 372 // Update the static counter each time a new code stub is generated. |
373 isolate()->counters()->code_stubs()->Increment(); | 373 isolate()->counters()->code_stubs()->Increment(); |
374 | 374 |
375 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); | 375 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); |
376 int param_count = descriptor->GetEnvironmentParameterCount(); | 376 int param_count = descriptor->GetEnvironmentParameterCount(); |
377 { | 377 { |
378 // Call the runtime system in a fresh internal frame. | 378 // Call the runtime system in a fresh internal frame. |
379 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 379 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); |
380 ASSERT(param_count == 0 || | 380 DCHECK(param_count == 0 || |
381 r0.is(descriptor->GetEnvironmentParameterRegister( | 381 r0.is(descriptor->GetEnvironmentParameterRegister( |
382 param_count - 1))); | 382 param_count - 1))); |
383 // Push arguments | 383 // Push arguments |
384 for (int i = 0; i < param_count; ++i) { | 384 for (int i = 0; i < param_count; ++i) { |
385 __ push(descriptor->GetEnvironmentParameterRegister(i)); | 385 __ push(descriptor->GetEnvironmentParameterRegister(i)); |
386 } | 386 } |
387 ExternalReference miss = descriptor->miss_handler(); | 387 ExternalReference miss = descriptor->miss_handler(); |
388 __ CallExternalReference(miss, param_count); | 388 __ CallExternalReference(miss, param_count); |
389 } | 389 } |
390 | 390 |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
484 exponent, | 484 exponent, |
485 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 485 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); |
486 __ Ret(); | 486 __ Ret(); |
487 } | 487 } |
488 | 488 |
489 | 489 |
490 void DoubleToIStub::Generate(MacroAssembler* masm) { | 490 void DoubleToIStub::Generate(MacroAssembler* masm) { |
491 Label out_of_range, only_low, negate, done; | 491 Label out_of_range, only_low, negate, done; |
492 Register input_reg = source(); | 492 Register input_reg = source(); |
493 Register result_reg = destination(); | 493 Register result_reg = destination(); |
494 ASSERT(is_truncating()); | 494 DCHECK(is_truncating()); |
495 | 495 |
496 int double_offset = offset(); | 496 int double_offset = offset(); |
497 // Account for saved regs if input is sp. | 497 // Account for saved regs if input is sp. |
498 if (input_reg.is(sp)) double_offset += 3 * kPointerSize; | 498 if (input_reg.is(sp)) double_offset += 3 * kPointerSize; |
499 | 499 |
500 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg); | 500 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg); |
501 Register scratch_low = | 501 Register scratch_low = |
502 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); | 502 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); |
503 Register scratch_high = | 503 Register scratch_high = |
504 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low); | 504 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low); |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
616 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | 616 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
617 __ mov(scratch_, Operand(non_smi_exponent)); | 617 __ mov(scratch_, Operand(non_smi_exponent)); |
618 // Set the sign bit in scratch_ if the value was negative. | 618 // Set the sign bit in scratch_ if the value was negative. |
619 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); | 619 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); |
620 // Subtract from 0 if the value was negative. | 620 // Subtract from 0 if the value was negative. |
621 __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs); | 621 __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs); |
622 // We should be masking the implict first digit of the mantissa away here, | 622 // We should be masking the implict first digit of the mantissa away here, |
623 // but it just ends up combining harmlessly with the last digit of the | 623 // but it just ends up combining harmlessly with the last digit of the |
624 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get | 624 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get |
625 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | 625 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. |
626 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | 626 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
627 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 627 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
628 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); | 628 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); |
629 __ str(scratch_, FieldMemOperand(the_heap_number_, | 629 __ str(scratch_, FieldMemOperand(the_heap_number_, |
630 HeapNumber::kExponentOffset)); | 630 HeapNumber::kExponentOffset)); |
631 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); | 631 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); |
632 __ str(scratch_, FieldMemOperand(the_heap_number_, | 632 __ str(scratch_, FieldMemOperand(the_heap_number_, |
633 HeapNumber::kMantissaOffset)); | 633 HeapNumber::kMantissaOffset)); |
634 __ Ret(); | 634 __ Ret(); |
635 | 635 |
636 __ bind(&max_negative_int); | 636 __ bind(&max_negative_int); |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
747 } | 747 } |
748 | 748 |
749 | 749 |
750 // See comment at call site. | 750 // See comment at call site. |
751 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 751 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
752 Register lhs, | 752 Register lhs, |
753 Register rhs, | 753 Register rhs, |
754 Label* lhs_not_nan, | 754 Label* lhs_not_nan, |
755 Label* slow, | 755 Label* slow, |
756 bool strict) { | 756 bool strict) { |
757 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 757 DCHECK((lhs.is(r0) && rhs.is(r1)) || |
758 (lhs.is(r1) && rhs.is(r0))); | 758 (lhs.is(r1) && rhs.is(r0))); |
759 | 759 |
760 Label rhs_is_smi; | 760 Label rhs_is_smi; |
761 __ JumpIfSmi(rhs, &rhs_is_smi); | 761 __ JumpIfSmi(rhs, &rhs_is_smi); |
762 | 762 |
763 // Lhs is a Smi. Check whether the rhs is a heap number. | 763 // Lhs is a Smi. Check whether the rhs is a heap number. |
764 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); | 764 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); |
765 if (strict) { | 765 if (strict) { |
766 // If rhs is not a number and lhs is a Smi then strict equality cannot | 766 // If rhs is not a number and lhs is a Smi then strict equality cannot |
767 // succeed. Return non-equal | 767 // succeed. Return non-equal |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
809 // Convert rhs to a double in d6 . | 809 // Convert rhs to a double in d6 . |
810 __ SmiToDouble(d6, rhs); | 810 __ SmiToDouble(d6, rhs); |
811 // Fall through to both_loaded_as_doubles. | 811 // Fall through to both_loaded_as_doubles. |
812 } | 812 } |
813 | 813 |
814 | 814 |
815 // See comment at call site. | 815 // See comment at call site. |
816 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 816 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
817 Register lhs, | 817 Register lhs, |
818 Register rhs) { | 818 Register rhs) { |
819 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 819 DCHECK((lhs.is(r0) && rhs.is(r1)) || |
820 (lhs.is(r1) && rhs.is(r0))); | 820 (lhs.is(r1) && rhs.is(r0))); |
821 | 821 |
822 // If either operand is a JS object or an oddball value, then they are | 822 // If either operand is a JS object or an oddball value, then they are |
823 // not equal since their pointers are different. | 823 // not equal since their pointers are different. |
824 // There is no test for undetectability in strict equality. | 824 // There is no test for undetectability in strict equality. |
825 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 825 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
826 Label first_non_object; | 826 Label first_non_object; |
827 // Get the type of the first operand into r2 and compare it with | 827 // Get the type of the first operand into r2 and compare it with |
828 // FIRST_SPEC_OBJECT_TYPE. | 828 // FIRST_SPEC_OBJECT_TYPE. |
829 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE); | 829 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE); |
(...skipping 25 matching lines...) Expand all Loading... |
855 } | 855 } |
856 | 856 |
857 | 857 |
858 // See comment at call site. | 858 // See comment at call site. |
859 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, | 859 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
860 Register lhs, | 860 Register lhs, |
861 Register rhs, | 861 Register rhs, |
862 Label* both_loaded_as_doubles, | 862 Label* both_loaded_as_doubles, |
863 Label* not_heap_numbers, | 863 Label* not_heap_numbers, |
864 Label* slow) { | 864 Label* slow) { |
865 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 865 DCHECK((lhs.is(r0) && rhs.is(r1)) || |
866 (lhs.is(r1) && rhs.is(r0))); | 866 (lhs.is(r1) && rhs.is(r0))); |
867 | 867 |
868 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 868 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); |
869 __ b(ne, not_heap_numbers); | 869 __ b(ne, not_heap_numbers); |
870 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 870 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
871 __ cmp(r2, r3); | 871 __ cmp(r2, r3); |
872 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | 872 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. |
873 | 873 |
874 // Both are heap numbers. Load them up then jump to the code we have | 874 // Both are heap numbers. Load them up then jump to the code we have |
875 // for that. | 875 // for that. |
876 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); | 876 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
877 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); | 877 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
878 __ jmp(both_loaded_as_doubles); | 878 __ jmp(both_loaded_as_doubles); |
879 } | 879 } |
880 | 880 |
881 | 881 |
882 // Fast negative check for internalized-to-internalized equality. | 882 // Fast negative check for internalized-to-internalized equality. |
883 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 883 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
884 Register lhs, | 884 Register lhs, |
885 Register rhs, | 885 Register rhs, |
886 Label* possible_strings, | 886 Label* possible_strings, |
887 Label* not_both_strings) { | 887 Label* not_both_strings) { |
888 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 888 DCHECK((lhs.is(r0) && rhs.is(r1)) || |
889 (lhs.is(r1) && rhs.is(r0))); | 889 (lhs.is(r1) && rhs.is(r0))); |
890 | 890 |
891 // r2 is object type of rhs. | 891 // r2 is object type of rhs. |
892 Label object_test; | 892 Label object_test; |
893 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 893 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
894 __ tst(r2, Operand(kIsNotStringMask)); | 894 __ tst(r2, Operand(kIsNotStringMask)); |
895 __ b(ne, &object_test); | 895 __ b(ne, &object_test); |
896 __ tst(r2, Operand(kIsNotInternalizedMask)); | 896 __ tst(r2, Operand(kIsNotInternalizedMask)); |
897 __ b(ne, possible_strings); | 897 __ b(ne, possible_strings); |
898 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); | 898 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
968 // NOTICE! This code is only reached after a smi-fast-case check, so | 968 // NOTICE! This code is only reached after a smi-fast-case check, so |
969 // it is certain that at least one operand isn't a smi. | 969 // it is certain that at least one operand isn't a smi. |
970 | 970 |
971 // Handle the case where the objects are identical. Either returns the answer | 971 // Handle the case where the objects are identical. Either returns the answer |
972 // or goes to slow. Only falls through if the objects were not identical. | 972 // or goes to slow. Only falls through if the objects were not identical. |
973 EmitIdenticalObjectComparison(masm, &slow, cc); | 973 EmitIdenticalObjectComparison(masm, &slow, cc); |
974 | 974 |
975 // If either is a Smi (we know that not both are), then they can only | 975 // If either is a Smi (we know that not both are), then they can only |
976 // be strictly equal if the other is a HeapNumber. | 976 // be strictly equal if the other is a HeapNumber. |
977 STATIC_ASSERT(kSmiTag == 0); | 977 STATIC_ASSERT(kSmiTag == 0); |
978 ASSERT_EQ(0, Smi::FromInt(0)); | 978 DCHECK_EQ(0, Smi::FromInt(0)); |
979 __ and_(r2, lhs, Operand(rhs)); | 979 __ and_(r2, lhs, Operand(rhs)); |
980 __ JumpIfNotSmi(r2, ¬_smis); | 980 __ JumpIfNotSmi(r2, ¬_smis); |
981 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | 981 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
982 // 1) Return the answer. | 982 // 1) Return the answer. |
983 // 2) Go to slow. | 983 // 2) Go to slow. |
984 // 3) Fall through to both_loaded_as_doubles. | 984 // 3) Fall through to both_loaded_as_doubles. |
985 // 4) Jump to lhs_not_nan. | 985 // 4) Jump to lhs_not_nan. |
986 // In cases 3 and 4 we have found out we were dealing with a number-number | 986 // In cases 3 and 4 we have found out we were dealing with a number-number |
987 // comparison. If VFP3 is supported the double values of the numbers have | 987 // comparison. If VFP3 is supported the double values of the numbers have |
988 // been loaded into d7 and d6. Otherwise, the double values have been loaded | 988 // been loaded into d7 and d6. Otherwise, the double values have been loaded |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1080 // Figure out which native to call and setup the arguments. | 1080 // Figure out which native to call and setup the arguments. |
1081 Builtins::JavaScript native; | 1081 Builtins::JavaScript native; |
1082 if (cc == eq) { | 1082 if (cc == eq) { |
1083 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | 1083 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
1084 } else { | 1084 } else { |
1085 native = Builtins::COMPARE; | 1085 native = Builtins::COMPARE; |
1086 int ncr; // NaN compare result | 1086 int ncr; // NaN compare result |
1087 if (cc == lt || cc == le) { | 1087 if (cc == lt || cc == le) { |
1088 ncr = GREATER; | 1088 ncr = GREATER; |
1089 } else { | 1089 } else { |
1090 ASSERT(cc == gt || cc == ge); // remaining cases | 1090 DCHECK(cc == gt || cc == ge); // remaining cases |
1091 ncr = LESS; | 1091 ncr = LESS; |
1092 } | 1092 } |
1093 __ mov(r0, Operand(Smi::FromInt(ncr))); | 1093 __ mov(r0, Operand(Smi::FromInt(ncr))); |
1094 __ push(r0); | 1094 __ push(r0); |
1095 } | 1095 } |
1096 | 1096 |
1097 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 1097 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
1098 // tagged as a small integer. | 1098 // tagged as a small integer. |
1099 __ InvokeBuiltin(native, JUMP_FUNCTION); | 1099 __ InvokeBuiltin(native, JUMP_FUNCTION); |
1100 | 1100 |
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1298 __ bind(&call_runtime); | 1298 __ bind(&call_runtime); |
1299 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); | 1299 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); |
1300 | 1300 |
1301 // The stub is called from non-optimized code, which expects the result | 1301 // The stub is called from non-optimized code, which expects the result |
1302 // as heap number in exponent. | 1302 // as heap number in exponent. |
1303 __ bind(&done); | 1303 __ bind(&done); |
1304 __ AllocateHeapNumber( | 1304 __ AllocateHeapNumber( |
1305 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); | 1305 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); |
1306 __ vstr(double_result, | 1306 __ vstr(double_result, |
1307 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 1307 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
1308 ASSERT(heapnumber.is(r0)); | 1308 DCHECK(heapnumber.is(r0)); |
1309 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); | 1309 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); |
1310 __ Ret(2); | 1310 __ Ret(2); |
1311 } else { | 1311 } else { |
1312 __ push(lr); | 1312 __ push(lr); |
1313 { | 1313 { |
1314 AllowExternalCallThatCantCauseGC scope(masm); | 1314 AllowExternalCallThatCantCauseGC scope(masm); |
1315 __ PrepareCallCFunction(0, 2, scratch); | 1315 __ PrepareCallCFunction(0, 2, scratch); |
1316 __ MovToFloatParameters(double_base, double_exponent); | 1316 __ MovToFloatParameters(double_base, double_exponent); |
1317 __ CallCFunction( | 1317 __ CallCFunction( |
1318 ExternalReference::power_double_double_function(isolate()), | 1318 ExternalReference::power_double_double_function(isolate()), |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1398 // r5: pointer to builtin function (C callee-saved) | 1398 // r5: pointer to builtin function (C callee-saved) |
1399 | 1399 |
1400 // Result returned in r0 or r0+r1 by default. | 1400 // Result returned in r0 or r0+r1 by default. |
1401 | 1401 |
1402 #if V8_HOST_ARCH_ARM | 1402 #if V8_HOST_ARCH_ARM |
1403 int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 1403 int frame_alignment = MacroAssembler::ActivationFrameAlignment(); |
1404 int frame_alignment_mask = frame_alignment - 1; | 1404 int frame_alignment_mask = frame_alignment - 1; |
1405 if (FLAG_debug_code) { | 1405 if (FLAG_debug_code) { |
1406 if (frame_alignment > kPointerSize) { | 1406 if (frame_alignment > kPointerSize) { |
1407 Label alignment_as_expected; | 1407 Label alignment_as_expected; |
1408 ASSERT(IsPowerOf2(frame_alignment)); | 1408 DCHECK(IsPowerOf2(frame_alignment)); |
1409 __ tst(sp, Operand(frame_alignment_mask)); | 1409 __ tst(sp, Operand(frame_alignment_mask)); |
1410 __ b(eq, &alignment_as_expected); | 1410 __ b(eq, &alignment_as_expected); |
1411 // Don't use Check here, as it will call Runtime_Abort re-entering here. | 1411 // Don't use Check here, as it will call Runtime_Abort re-entering here. |
1412 __ stop("Unexpected alignment"); | 1412 __ stop("Unexpected alignment"); |
1413 __ bind(&alignment_as_expected); | 1413 __ bind(&alignment_as_expected); |
1414 } | 1414 } |
1415 } | 1415 } |
1416 #endif | 1416 #endif |
1417 | 1417 |
1418 // Call C built-in. | 1418 // Call C built-in. |
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1674 // Uses registers r0 to r4. | 1674 // Uses registers r0 to r4. |
1675 // Expected input (depending on whether args are in registers or on the stack): | 1675 // Expected input (depending on whether args are in registers or on the stack): |
1676 // * object: r0 or at sp + 1 * kPointerSize. | 1676 // * object: r0 or at sp + 1 * kPointerSize. |
1677 // * function: r1 or at sp. | 1677 // * function: r1 or at sp. |
1678 // | 1678 // |
1679 // An inlined call site may have been generated before calling this stub. | 1679 // An inlined call site may have been generated before calling this stub. |
1680 // In this case the offset to the inline sites to patch are passed in r5 and r6. | 1680 // In this case the offset to the inline sites to patch are passed in r5 and r6. |
1681 // (See LCodeGen::DoInstanceOfKnownGlobal) | 1681 // (See LCodeGen::DoInstanceOfKnownGlobal) |
1682 void InstanceofStub::Generate(MacroAssembler* masm) { | 1682 void InstanceofStub::Generate(MacroAssembler* masm) { |
1683 // Call site inlining and patching implies arguments in registers. | 1683 // Call site inlining and patching implies arguments in registers. |
1684 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); | 1684 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
1685 | 1685 |
1686 // Fixed register usage throughout the stub: | 1686 // Fixed register usage throughout the stub: |
1687 const Register object = r0; // Object (lhs). | 1687 const Register object = r0; // Object (lhs). |
1688 Register map = r3; // Map of the object. | 1688 Register map = r3; // Map of the object. |
1689 const Register function = r1; // Function (rhs). | 1689 const Register function = r1; // Function (rhs). |
1690 const Register prototype = r4; // Prototype of the function. | 1690 const Register prototype = r4; // Prototype of the function. |
1691 const Register scratch = r2; | 1691 const Register scratch = r2; |
1692 | 1692 |
1693 Label slow, loop, is_instance, is_not_instance, not_js_object; | 1693 Label slow, loop, is_instance, is_not_instance, not_js_object; |
1694 | 1694 |
(...skipping 26 matching lines...) Expand all Loading... |
1721 // Check that the function prototype is a JS object. | 1721 // Check that the function prototype is a JS object. |
1722 __ JumpIfSmi(prototype, &slow); | 1722 __ JumpIfSmi(prototype, &slow); |
1723 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); | 1723 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
1724 | 1724 |
1725 // Update the global instanceof or call site inlined cache with the current | 1725 // Update the global instanceof or call site inlined cache with the current |
1726 // map and function. The cached answer will be set when it is known below. | 1726 // map and function. The cached answer will be set when it is known below. |
1727 if (!HasCallSiteInlineCheck()) { | 1727 if (!HasCallSiteInlineCheck()) { |
1728 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 1728 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
1729 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | 1729 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
1730 } else { | 1730 } else { |
1731 ASSERT(HasArgsInRegisters()); | 1731 DCHECK(HasArgsInRegisters()); |
1732 // Patch the (relocated) inlined map check. | 1732 // Patch the (relocated) inlined map check. |
1733 | 1733 |
1734 // The map_load_offset was stored in r5 | 1734 // The map_load_offset was stored in r5 |
1735 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). | 1735 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). |
1736 const Register map_load_offset = r5; | 1736 const Register map_load_offset = r5; |
1737 __ sub(r9, lr, map_load_offset); | 1737 __ sub(r9, lr, map_load_offset); |
1738 // Get the map location in r5 and patch it. | 1738 // Get the map location in r5 and patch it. |
1739 __ GetRelocatedValueLocation(r9, map_load_offset, scratch); | 1739 __ GetRelocatedValueLocation(r9, map_load_offset, scratch); |
1740 __ ldr(map_load_offset, MemOperand(map_load_offset)); | 1740 __ ldr(map_load_offset, MemOperand(map_load_offset)); |
1741 __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset)); | 1741 __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset)); |
(...skipping 948 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2690 static void GenerateRecordCallTarget(MacroAssembler* masm) { | 2690 static void GenerateRecordCallTarget(MacroAssembler* masm) { |
2691 // Cache the called function in a feedback vector slot. Cache states | 2691 // Cache the called function in a feedback vector slot. Cache states |
2692 // are uninitialized, monomorphic (indicated by a JSFunction), and | 2692 // are uninitialized, monomorphic (indicated by a JSFunction), and |
2693 // megamorphic. | 2693 // megamorphic. |
2694 // r0 : number of arguments to the construct function | 2694 // r0 : number of arguments to the construct function |
2695 // r1 : the function to call | 2695 // r1 : the function to call |
2696 // r2 : Feedback vector | 2696 // r2 : Feedback vector |
2697 // r3 : slot in feedback vector (Smi) | 2697 // r3 : slot in feedback vector (Smi) |
2698 Label initialize, done, miss, megamorphic, not_array_function; | 2698 Label initialize, done, miss, megamorphic, not_array_function; |
2699 | 2699 |
2700 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | 2700 DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), |
2701 masm->isolate()->heap()->megamorphic_symbol()); | 2701 masm->isolate()->heap()->megamorphic_symbol()); |
2702 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), | 2702 DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), |
2703 masm->isolate()->heap()->uninitialized_symbol()); | 2703 masm->isolate()->heap()->uninitialized_symbol()); |
2704 | 2704 |
2705 // Load the cache state into r4. | 2705 // Load the cache state into r4. |
2706 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); | 2706 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); |
2707 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize)); | 2707 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize)); |
2708 | 2708 |
2709 // A monomorphic cache hit or an already megamorphic state: invoke the | 2709 // A monomorphic cache hit or an already megamorphic state: invoke the |
2710 // function without changing the state. | 2710 // function without changing the state. |
2711 __ cmp(r4, r1); | 2711 __ cmp(r4, r1); |
2712 __ b(eq, &done); | 2712 __ b(eq, &done); |
(...skipping 446 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3159 result_, | 3159 result_, |
3160 Heap::kHeapNumberMapRootIndex, | 3160 Heap::kHeapNumberMapRootIndex, |
3161 index_not_number_, | 3161 index_not_number_, |
3162 DONT_DO_SMI_CHECK); | 3162 DONT_DO_SMI_CHECK); |
3163 call_helper.BeforeCall(masm); | 3163 call_helper.BeforeCall(masm); |
3164 __ push(object_); | 3164 __ push(object_); |
3165 __ push(index_); // Consumed by runtime conversion function. | 3165 __ push(index_); // Consumed by runtime conversion function. |
3166 if (index_flags_ == STRING_INDEX_IS_NUMBER) { | 3166 if (index_flags_ == STRING_INDEX_IS_NUMBER) { |
3167 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | 3167 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); |
3168 } else { | 3168 } else { |
3169 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | 3169 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
3170 // NumberToSmi discards numbers that are not exact integers. | 3170 // NumberToSmi discards numbers that are not exact integers. |
3171 __ CallRuntime(Runtime::kNumberToSmi, 1); | 3171 __ CallRuntime(Runtime::kNumberToSmi, 1); |
3172 } | 3172 } |
3173 // Save the conversion result before the pop instructions below | 3173 // Save the conversion result before the pop instructions below |
3174 // have a chance to overwrite it. | 3174 // have a chance to overwrite it. |
3175 __ Move(index_, r0); | 3175 __ Move(index_, r0); |
3176 __ pop(object_); | 3176 __ pop(object_); |
3177 // Reload the instance type. | 3177 // Reload the instance type. |
3178 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 3178 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
3179 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 3179 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
(...skipping 19 matching lines...) Expand all Loading... |
3199 } | 3199 } |
3200 | 3200 |
3201 | 3201 |
3202 // ------------------------------------------------------------------------- | 3202 // ------------------------------------------------------------------------- |
3203 // StringCharFromCodeGenerator | 3203 // StringCharFromCodeGenerator |
3204 | 3204 |
3205 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 3205 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
3206 // Fast case of Heap::LookupSingleCharacterStringFromCode. | 3206 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
3207 STATIC_ASSERT(kSmiTag == 0); | 3207 STATIC_ASSERT(kSmiTag == 0); |
3208 STATIC_ASSERT(kSmiShiftSize == 0); | 3208 STATIC_ASSERT(kSmiShiftSize == 0); |
3209 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); | 3209 DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); |
3210 __ tst(code_, | 3210 __ tst(code_, |
3211 Operand(kSmiTagMask | | 3211 Operand(kSmiTagMask | |
3212 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); | 3212 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); |
3213 __ b(ne, &slow_case_); | 3213 __ b(ne, &slow_case_); |
3214 | 3214 |
3215 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 3215 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
3216 // At this point code register contains smi tagged ASCII char code. | 3216 // At this point code register contains smi tagged ASCII char code. |
3217 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_)); | 3217 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_)); |
3218 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 3218 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
3219 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); | 3219 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); |
(...skipping 386 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3606 __ cmp(min_length, Operand::Zero()); | 3606 __ cmp(min_length, Operand::Zero()); |
3607 __ b(eq, &compare_lengths); | 3607 __ b(eq, &compare_lengths); |
3608 | 3608 |
3609 // Compare loop. | 3609 // Compare loop. |
3610 GenerateAsciiCharsCompareLoop(masm, | 3610 GenerateAsciiCharsCompareLoop(masm, |
3611 left, right, min_length, scratch2, scratch4, | 3611 left, right, min_length, scratch2, scratch4, |
3612 &result_not_equal); | 3612 &result_not_equal); |
3613 | 3613 |
3614 // Compare lengths - strings up to min-length are equal. | 3614 // Compare lengths - strings up to min-length are equal. |
3615 __ bind(&compare_lengths); | 3615 __ bind(&compare_lengths); |
3616 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 3616 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
3617 // Use length_delta as result if it's zero. | 3617 // Use length_delta as result if it's zero. |
3618 __ mov(r0, Operand(length_delta), SetCC); | 3618 __ mov(r0, Operand(length_delta), SetCC); |
3619 __ bind(&result_not_equal); | 3619 __ bind(&result_not_equal); |
3620 // Conditionally update the result based either on length_delta or | 3620 // Conditionally update the result based either on length_delta or |
3621 // the last comparion performed in the loop above. | 3621 // the last comparion performed in the loop above. |
3622 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); | 3622 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); |
3623 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); | 3623 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); |
3624 __ Ret(); | 3624 __ Ret(); |
3625 } | 3625 } |
3626 | 3626 |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3718 } | 3718 } |
3719 | 3719 |
3720 // Tail call into the stub that handles binary operations with allocation | 3720 // Tail call into the stub that handles binary operations with allocation |
3721 // sites. | 3721 // sites. |
3722 BinaryOpWithAllocationSiteStub stub(isolate(), state_); | 3722 BinaryOpWithAllocationSiteStub stub(isolate(), state_); |
3723 __ TailCallStub(&stub); | 3723 __ TailCallStub(&stub); |
3724 } | 3724 } |
3725 | 3725 |
3726 | 3726 |
3727 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 3727 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
3728 ASSERT(state_ == CompareIC::SMI); | 3728 DCHECK(state_ == CompareIC::SMI); |
3729 Label miss; | 3729 Label miss; |
3730 __ orr(r2, r1, r0); | 3730 __ orr(r2, r1, r0); |
3731 __ JumpIfNotSmi(r2, &miss); | 3731 __ JumpIfNotSmi(r2, &miss); |
3732 | 3732 |
3733 if (GetCondition() == eq) { | 3733 if (GetCondition() == eq) { |
3734 // For equality we do not care about the sign of the result. | 3734 // For equality we do not care about the sign of the result. |
3735 __ sub(r0, r0, r1, SetCC); | 3735 __ sub(r0, r0, r1, SetCC); |
3736 } else { | 3736 } else { |
3737 // Untag before subtracting to avoid handling overflow. | 3737 // Untag before subtracting to avoid handling overflow. |
3738 __ SmiUntag(r1); | 3738 __ SmiUntag(r1); |
3739 __ sub(r0, r1, Operand::SmiUntag(r0)); | 3739 __ sub(r0, r1, Operand::SmiUntag(r0)); |
3740 } | 3740 } |
3741 __ Ret(); | 3741 __ Ret(); |
3742 | 3742 |
3743 __ bind(&miss); | 3743 __ bind(&miss); |
3744 GenerateMiss(masm); | 3744 GenerateMiss(masm); |
3745 } | 3745 } |
3746 | 3746 |
3747 | 3747 |
3748 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 3748 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
3749 ASSERT(state_ == CompareIC::NUMBER); | 3749 DCHECK(state_ == CompareIC::NUMBER); |
3750 | 3750 |
3751 Label generic_stub; | 3751 Label generic_stub; |
3752 Label unordered, maybe_undefined1, maybe_undefined2; | 3752 Label unordered, maybe_undefined1, maybe_undefined2; |
3753 Label miss; | 3753 Label miss; |
3754 | 3754 |
3755 if (left_ == CompareIC::SMI) { | 3755 if (left_ == CompareIC::SMI) { |
3756 __ JumpIfNotSmi(r1, &miss); | 3756 __ JumpIfNotSmi(r1, &miss); |
3757 } | 3757 } |
3758 if (right_ == CompareIC::SMI) { | 3758 if (right_ == CompareIC::SMI) { |
3759 __ JumpIfNotSmi(r0, &miss); | 3759 __ JumpIfNotSmi(r0, &miss); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3816 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); | 3816 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
3817 __ b(eq, &unordered); | 3817 __ b(eq, &unordered); |
3818 } | 3818 } |
3819 | 3819 |
3820 __ bind(&miss); | 3820 __ bind(&miss); |
3821 GenerateMiss(masm); | 3821 GenerateMiss(masm); |
3822 } | 3822 } |
3823 | 3823 |
3824 | 3824 |
3825 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { | 3825 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { |
3826 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); | 3826 DCHECK(state_ == CompareIC::INTERNALIZED_STRING); |
3827 Label miss; | 3827 Label miss; |
3828 | 3828 |
3829 // Registers containing left and right operands respectively. | 3829 // Registers containing left and right operands respectively. |
3830 Register left = r1; | 3830 Register left = r1; |
3831 Register right = r0; | 3831 Register right = r0; |
3832 Register tmp1 = r2; | 3832 Register tmp1 = r2; |
3833 Register tmp2 = r3; | 3833 Register tmp2 = r3; |
3834 | 3834 |
3835 // Check that both operands are heap objects. | 3835 // Check that both operands are heap objects. |
3836 __ JumpIfEitherSmi(left, right, &miss); | 3836 __ JumpIfEitherSmi(left, right, &miss); |
3837 | 3837 |
3838 // Check that both operands are internalized strings. | 3838 // Check that both operands are internalized strings. |
3839 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 3839 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
3840 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 3840 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
3841 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 3841 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
3842 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 3842 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
3843 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 3843 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
3844 __ orr(tmp1, tmp1, Operand(tmp2)); | 3844 __ orr(tmp1, tmp1, Operand(tmp2)); |
3845 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 3845 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
3846 __ b(ne, &miss); | 3846 __ b(ne, &miss); |
3847 | 3847 |
3848 // Internalized strings are compared by identity. | 3848 // Internalized strings are compared by identity. |
3849 __ cmp(left, right); | 3849 __ cmp(left, right); |
3850 // Make sure r0 is non-zero. At this point input operands are | 3850 // Make sure r0 is non-zero. At this point input operands are |
3851 // guaranteed to be non-zero. | 3851 // guaranteed to be non-zero. |
3852 ASSERT(right.is(r0)); | 3852 DCHECK(right.is(r0)); |
3853 STATIC_ASSERT(EQUAL == 0); | 3853 STATIC_ASSERT(EQUAL == 0); |
3854 STATIC_ASSERT(kSmiTag == 0); | 3854 STATIC_ASSERT(kSmiTag == 0); |
3855 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 3855 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
3856 __ Ret(); | 3856 __ Ret(); |
3857 | 3857 |
3858 __ bind(&miss); | 3858 __ bind(&miss); |
3859 GenerateMiss(masm); | 3859 GenerateMiss(masm); |
3860 } | 3860 } |
3861 | 3861 |
3862 | 3862 |
3863 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | 3863 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { |
3864 ASSERT(state_ == CompareIC::UNIQUE_NAME); | 3864 DCHECK(state_ == CompareIC::UNIQUE_NAME); |
3865 ASSERT(GetCondition() == eq); | 3865 DCHECK(GetCondition() == eq); |
3866 Label miss; | 3866 Label miss; |
3867 | 3867 |
3868 // Registers containing left and right operands respectively. | 3868 // Registers containing left and right operands respectively. |
3869 Register left = r1; | 3869 Register left = r1; |
3870 Register right = r0; | 3870 Register right = r0; |
3871 Register tmp1 = r2; | 3871 Register tmp1 = r2; |
3872 Register tmp2 = r3; | 3872 Register tmp2 = r3; |
3873 | 3873 |
3874 // Check that both operands are heap objects. | 3874 // Check that both operands are heap objects. |
3875 __ JumpIfEitherSmi(left, right, &miss); | 3875 __ JumpIfEitherSmi(left, right, &miss); |
3876 | 3876 |
3877 // Check that both operands are unique names. This leaves the instance | 3877 // Check that both operands are unique names. This leaves the instance |
3878 // types loaded in tmp1 and tmp2. | 3878 // types loaded in tmp1 and tmp2. |
3879 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 3879 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
3880 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 3880 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
3881 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 3881 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
3882 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 3882 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
3883 | 3883 |
3884 __ JumpIfNotUniqueName(tmp1, &miss); | 3884 __ JumpIfNotUniqueName(tmp1, &miss); |
3885 __ JumpIfNotUniqueName(tmp2, &miss); | 3885 __ JumpIfNotUniqueName(tmp2, &miss); |
3886 | 3886 |
3887 // Unique names are compared by identity. | 3887 // Unique names are compared by identity. |
3888 __ cmp(left, right); | 3888 __ cmp(left, right); |
3889 // Make sure r0 is non-zero. At this point input operands are | 3889 // Make sure r0 is non-zero. At this point input operands are |
3890 // guaranteed to be non-zero. | 3890 // guaranteed to be non-zero. |
3891 ASSERT(right.is(r0)); | 3891 DCHECK(right.is(r0)); |
3892 STATIC_ASSERT(EQUAL == 0); | 3892 STATIC_ASSERT(EQUAL == 0); |
3893 STATIC_ASSERT(kSmiTag == 0); | 3893 STATIC_ASSERT(kSmiTag == 0); |
3894 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 3894 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
3895 __ Ret(); | 3895 __ Ret(); |
3896 | 3896 |
3897 __ bind(&miss); | 3897 __ bind(&miss); |
3898 GenerateMiss(masm); | 3898 GenerateMiss(masm); |
3899 } | 3899 } |
3900 | 3900 |
3901 | 3901 |
3902 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 3902 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
3903 ASSERT(state_ == CompareIC::STRING); | 3903 DCHECK(state_ == CompareIC::STRING); |
3904 Label miss; | 3904 Label miss; |
3905 | 3905 |
3906 bool equality = Token::IsEqualityOp(op_); | 3906 bool equality = Token::IsEqualityOp(op_); |
3907 | 3907 |
3908 // Registers containing left and right operands respectively. | 3908 // Registers containing left and right operands respectively. |
3909 Register left = r1; | 3909 Register left = r1; |
3910 Register right = r0; | 3910 Register right = r0; |
3911 Register tmp1 = r2; | 3911 Register tmp1 = r2; |
3912 Register tmp2 = r3; | 3912 Register tmp2 = r3; |
3913 Register tmp3 = r4; | 3913 Register tmp3 = r4; |
(...skipping 19 matching lines...) Expand all Loading... |
3933 STATIC_ASSERT(kSmiTag == 0); | 3933 STATIC_ASSERT(kSmiTag == 0); |
3934 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 3934 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
3935 __ Ret(eq); | 3935 __ Ret(eq); |
3936 | 3936 |
3937 // Handle not identical strings. | 3937 // Handle not identical strings. |
3938 | 3938 |
3939 // Check that both strings are internalized strings. If they are, we're done | 3939 // Check that both strings are internalized strings. If they are, we're done |
3940 // because we already know they are not identical. We know they are both | 3940 // because we already know they are not identical. We know they are both |
3941 // strings. | 3941 // strings. |
3942 if (equality) { | 3942 if (equality) { |
3943 ASSERT(GetCondition() == eq); | 3943 DCHECK(GetCondition() == eq); |
3944 STATIC_ASSERT(kInternalizedTag == 0); | 3944 STATIC_ASSERT(kInternalizedTag == 0); |
3945 __ orr(tmp3, tmp1, Operand(tmp2)); | 3945 __ orr(tmp3, tmp1, Operand(tmp2)); |
3946 __ tst(tmp3, Operand(kIsNotInternalizedMask)); | 3946 __ tst(tmp3, Operand(kIsNotInternalizedMask)); |
3947 // Make sure r0 is non-zero. At this point input operands are | 3947 // Make sure r0 is non-zero. At this point input operands are |
3948 // guaranteed to be non-zero. | 3948 // guaranteed to be non-zero. |
3949 ASSERT(right.is(r0)); | 3949 DCHECK(right.is(r0)); |
3950 __ Ret(eq); | 3950 __ Ret(eq); |
3951 } | 3951 } |
3952 | 3952 |
3953 // Check that both strings are sequential ASCII. | 3953 // Check that both strings are sequential ASCII. |
3954 Label runtime; | 3954 Label runtime; |
3955 __ JumpIfBothInstanceTypesAreNotSequentialAscii( | 3955 __ JumpIfBothInstanceTypesAreNotSequentialAscii( |
3956 tmp1, tmp2, tmp3, tmp4, &runtime); | 3956 tmp1, tmp2, tmp3, tmp4, &runtime); |
3957 | 3957 |
3958 // Compare flat ASCII strings. Returns when done. | 3958 // Compare flat ASCII strings. Returns when done. |
3959 if (equality) { | 3959 if (equality) { |
(...skipping 12 matching lines...) Expand all Loading... |
3972 } else { | 3972 } else { |
3973 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 3973 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
3974 } | 3974 } |
3975 | 3975 |
3976 __ bind(&miss); | 3976 __ bind(&miss); |
3977 GenerateMiss(masm); | 3977 GenerateMiss(masm); |
3978 } | 3978 } |
3979 | 3979 |
3980 | 3980 |
3981 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 3981 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
3982 ASSERT(state_ == CompareIC::OBJECT); | 3982 DCHECK(state_ == CompareIC::OBJECT); |
3983 Label miss; | 3983 Label miss; |
3984 __ and_(r2, r1, Operand(r0)); | 3984 __ and_(r2, r1, Operand(r0)); |
3985 __ JumpIfSmi(r2, &miss); | 3985 __ JumpIfSmi(r2, &miss); |
3986 | 3986 |
3987 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); | 3987 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); |
3988 __ b(ne, &miss); | 3988 __ b(ne, &miss); |
3989 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); | 3989 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); |
3990 __ b(ne, &miss); | 3990 __ b(ne, &miss); |
3991 | 3991 |
3992 ASSERT(GetCondition() == eq); | 3992 DCHECK(GetCondition() == eq); |
3993 __ sub(r0, r0, Operand(r1)); | 3993 __ sub(r0, r0, Operand(r1)); |
3994 __ Ret(); | 3994 __ Ret(); |
3995 | 3995 |
3996 __ bind(&miss); | 3996 __ bind(&miss); |
3997 GenerateMiss(masm); | 3997 GenerateMiss(masm); |
3998 } | 3998 } |
3999 | 3999 |
4000 | 4000 |
4001 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | 4001 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { |
4002 Label miss; | 4002 Label miss; |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4061 } | 4061 } |
4062 | 4062 |
4063 | 4063 |
4064 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | 4064 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, |
4065 Label* miss, | 4065 Label* miss, |
4066 Label* done, | 4066 Label* done, |
4067 Register receiver, | 4067 Register receiver, |
4068 Register properties, | 4068 Register properties, |
4069 Handle<Name> name, | 4069 Handle<Name> name, |
4070 Register scratch0) { | 4070 Register scratch0) { |
4071 ASSERT(name->IsUniqueName()); | 4071 DCHECK(name->IsUniqueName()); |
4072 // If names of slots in range from 1 to kProbes - 1 for the hash value are | 4072 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
4073 // not equal to the name and kProbes-th slot is not used (its name is the | 4073 // not equal to the name and kProbes-th slot is not used (its name is the |
4074 // undefined value), it guarantees the hash table doesn't contain the | 4074 // undefined value), it guarantees the hash table doesn't contain the |
4075 // property. It's true even if some slots represent deleted properties | 4075 // property. It's true even if some slots represent deleted properties |
4076 // (their names are the hole value). | 4076 // (their names are the hole value). |
4077 for (int i = 0; i < kInlinedProbes; i++) { | 4077 for (int i = 0; i < kInlinedProbes; i++) { |
4078 // scratch0 points to properties hash. | 4078 // scratch0 points to properties hash. |
4079 // Compute the masked index: (hash + i + i * i) & mask. | 4079 // Compute the masked index: (hash + i + i * i) & mask. |
4080 Register index = scratch0; | 4080 Register index = scratch0; |
4081 // Capacity is smi 2^n. | 4081 // Capacity is smi 2^n. |
4082 __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); | 4082 __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); |
4083 __ sub(index, index, Operand(1)); | 4083 __ sub(index, index, Operand(1)); |
4084 __ and_(index, index, Operand( | 4084 __ and_(index, index, Operand( |
4085 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); | 4085 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); |
4086 | 4086 |
4087 // Scale the index by multiplying by the entry size. | 4087 // Scale the index by multiplying by the entry size. |
4088 ASSERT(NameDictionary::kEntrySize == 3); | 4088 DCHECK(NameDictionary::kEntrySize == 3); |
4089 __ add(index, index, Operand(index, LSL, 1)); // index *= 3. | 4089 __ add(index, index, Operand(index, LSL, 1)); // index *= 3. |
4090 | 4090 |
4091 Register entity_name = scratch0; | 4091 Register entity_name = scratch0; |
4092 // Having undefined at this place means the name is not contained. | 4092 // Having undefined at this place means the name is not contained. |
4093 ASSERT_EQ(kSmiTagSize, 1); | 4093 DCHECK_EQ(kSmiTagSize, 1); |
4094 Register tmp = properties; | 4094 Register tmp = properties; |
4095 __ add(tmp, properties, Operand(index, LSL, 1)); | 4095 __ add(tmp, properties, Operand(index, LSL, 1)); |
4096 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 4096 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
4097 | 4097 |
4098 ASSERT(!tmp.is(entity_name)); | 4098 DCHECK(!tmp.is(entity_name)); |
4099 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 4099 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
4100 __ cmp(entity_name, tmp); | 4100 __ cmp(entity_name, tmp); |
4101 __ b(eq, done); | 4101 __ b(eq, done); |
4102 | 4102 |
4103 // Load the hole ready for use below: | 4103 // Load the hole ready for use below: |
4104 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); | 4104 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
4105 | 4105 |
4106 // Stop if found the property. | 4106 // Stop if found the property. |
4107 __ cmp(entity_name, Operand(Handle<Name>(name))); | 4107 __ cmp(entity_name, Operand(Handle<Name>(name))); |
4108 __ b(eq, miss); | 4108 __ b(eq, miss); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4144 // |done| label if a property with the given name is found. Jump to | 4144 // |done| label if a property with the given name is found. Jump to |
4145 // the |miss| label otherwise. | 4145 // the |miss| label otherwise. |
4146 // If lookup was successful |scratch2| will be equal to elements + 4 * index. | 4146 // If lookup was successful |scratch2| will be equal to elements + 4 * index. |
4147 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, | 4147 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, |
4148 Label* miss, | 4148 Label* miss, |
4149 Label* done, | 4149 Label* done, |
4150 Register elements, | 4150 Register elements, |
4151 Register name, | 4151 Register name, |
4152 Register scratch1, | 4152 Register scratch1, |
4153 Register scratch2) { | 4153 Register scratch2) { |
4154 ASSERT(!elements.is(scratch1)); | 4154 DCHECK(!elements.is(scratch1)); |
4155 ASSERT(!elements.is(scratch2)); | 4155 DCHECK(!elements.is(scratch2)); |
4156 ASSERT(!name.is(scratch1)); | 4156 DCHECK(!name.is(scratch1)); |
4157 ASSERT(!name.is(scratch2)); | 4157 DCHECK(!name.is(scratch2)); |
4158 | 4158 |
4159 __ AssertName(name); | 4159 __ AssertName(name); |
4160 | 4160 |
4161 // Compute the capacity mask. | 4161 // Compute the capacity mask. |
4162 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 4162 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
4163 __ SmiUntag(scratch1); | 4163 __ SmiUntag(scratch1); |
4164 __ sub(scratch1, scratch1, Operand(1)); | 4164 __ sub(scratch1, scratch1, Operand(1)); |
4165 | 4165 |
4166 // Generate an unrolled loop that performs a few probes before | 4166 // Generate an unrolled loop that performs a few probes before |
4167 // giving up. Measurements done on Gmail indicate that 2 probes | 4167 // giving up. Measurements done on Gmail indicate that 2 probes |
4168 // cover ~93% of loads from dictionaries. | 4168 // cover ~93% of loads from dictionaries. |
4169 for (int i = 0; i < kInlinedProbes; i++) { | 4169 for (int i = 0; i < kInlinedProbes; i++) { |
4170 // Compute the masked index: (hash + i + i * i) & mask. | 4170 // Compute the masked index: (hash + i + i * i) & mask. |
4171 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 4171 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
4172 if (i > 0) { | 4172 if (i > 0) { |
4173 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4173 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
4174 // the hash in a separate instruction. The value hash + i + i * i is right | 4174 // the hash in a separate instruction. The value hash + i + i * i is right |
4175 // shifted in the following and instruction. | 4175 // shifted in the following and instruction. |
4176 ASSERT(NameDictionary::GetProbeOffset(i) < | 4176 DCHECK(NameDictionary::GetProbeOffset(i) < |
4177 1 << (32 - Name::kHashFieldOffset)); | 4177 1 << (32 - Name::kHashFieldOffset)); |
4178 __ add(scratch2, scratch2, Operand( | 4178 __ add(scratch2, scratch2, Operand( |
4179 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4179 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
4180 } | 4180 } |
4181 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); | 4181 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); |
4182 | 4182 |
4183 // Scale the index by multiplying by the element size. | 4183 // Scale the index by multiplying by the element size. |
4184 ASSERT(NameDictionary::kEntrySize == 3); | 4184 DCHECK(NameDictionary::kEntrySize == 3); |
4185 // scratch2 = scratch2 * 3. | 4185 // scratch2 = scratch2 * 3. |
4186 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); | 4186 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); |
4187 | 4187 |
4188 // Check if the key is identical to the name. | 4188 // Check if the key is identical to the name. |
4189 __ add(scratch2, elements, Operand(scratch2, LSL, 2)); | 4189 __ add(scratch2, elements, Operand(scratch2, LSL, 2)); |
4190 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); | 4190 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); |
4191 __ cmp(name, Operand(ip)); | 4191 __ cmp(name, Operand(ip)); |
4192 __ b(eq, done); | 4192 __ b(eq, done); |
4193 } | 4193 } |
4194 | 4194 |
4195 const int spill_mask = | 4195 const int spill_mask = |
4196 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | | 4196 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | |
4197 r3.bit() | r2.bit() | r1.bit() | r0.bit()) & | 4197 r3.bit() | r2.bit() | r1.bit() | r0.bit()) & |
4198 ~(scratch1.bit() | scratch2.bit()); | 4198 ~(scratch1.bit() | scratch2.bit()); |
4199 | 4199 |
4200 __ stm(db_w, sp, spill_mask); | 4200 __ stm(db_w, sp, spill_mask); |
4201 if (name.is(r0)) { | 4201 if (name.is(r0)) { |
4202 ASSERT(!elements.is(r1)); | 4202 DCHECK(!elements.is(r1)); |
4203 __ Move(r1, name); | 4203 __ Move(r1, name); |
4204 __ Move(r0, elements); | 4204 __ Move(r0, elements); |
4205 } else { | 4205 } else { |
4206 __ Move(r0, elements); | 4206 __ Move(r0, elements); |
4207 __ Move(r1, name); | 4207 __ Move(r1, name); |
4208 } | 4208 } |
4209 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); | 4209 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); |
4210 __ CallStub(&stub); | 4210 __ CallStub(&stub); |
4211 __ cmp(r0, Operand::Zero()); | 4211 __ cmp(r0, Operand::Zero()); |
4212 __ mov(scratch2, Operand(r2)); | 4212 __ mov(scratch2, Operand(r2)); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4248 | 4248 |
4249 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 4249 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
4250 | 4250 |
4251 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 4251 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
4252 // Compute the masked index: (hash + i + i * i) & mask. | 4252 // Compute the masked index: (hash + i + i * i) & mask. |
4253 // Capacity is smi 2^n. | 4253 // Capacity is smi 2^n. |
4254 if (i > 0) { | 4254 if (i > 0) { |
4255 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4255 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
4256 // the hash in a separate instruction. The value hash + i + i * i is right | 4256 // the hash in a separate instruction. The value hash + i + i * i is right |
4257 // shifted in the following and instruction. | 4257 // shifted in the following and instruction. |
4258 ASSERT(NameDictionary::GetProbeOffset(i) < | 4258 DCHECK(NameDictionary::GetProbeOffset(i) < |
4259 1 << (32 - Name::kHashFieldOffset)); | 4259 1 << (32 - Name::kHashFieldOffset)); |
4260 __ add(index, hash, Operand( | 4260 __ add(index, hash, Operand( |
4261 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4261 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
4262 } else { | 4262 } else { |
4263 __ mov(index, Operand(hash)); | 4263 __ mov(index, Operand(hash)); |
4264 } | 4264 } |
4265 __ and_(index, mask, Operand(index, LSR, Name::kHashShift)); | 4265 __ and_(index, mask, Operand(index, LSR, Name::kHashShift)); |
4266 | 4266 |
4267 // Scale the index by multiplying by the entry size. | 4267 // Scale the index by multiplying by the entry size. |
4268 ASSERT(NameDictionary::kEntrySize == 3); | 4268 DCHECK(NameDictionary::kEntrySize == 3); |
4269 __ add(index, index, Operand(index, LSL, 1)); // index *= 3. | 4269 __ add(index, index, Operand(index, LSL, 1)); // index *= 3. |
4270 | 4270 |
4271 ASSERT_EQ(kSmiTagSize, 1); | 4271 DCHECK_EQ(kSmiTagSize, 1); |
4272 __ add(index, dictionary, Operand(index, LSL, 2)); | 4272 __ add(index, dictionary, Operand(index, LSL, 2)); |
4273 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 4273 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
4274 | 4274 |
4275 // Having undefined at this place means the name is not contained. | 4275 // Having undefined at this place means the name is not contained. |
4276 __ cmp(entry_key, Operand(undefined)); | 4276 __ cmp(entry_key, Operand(undefined)); |
4277 __ b(eq, ¬_in_dictionary); | 4277 __ b(eq, ¬_in_dictionary); |
4278 | 4278 |
4279 // Stop if found the property. | 4279 // Stop if found the property. |
4280 __ cmp(entry_key, Operand(key)); | 4280 __ cmp(entry_key, Operand(key)); |
4281 __ b(eq, &in_dictionary); | 4281 __ b(eq, &in_dictionary); |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4349 __ Ret(); | 4349 __ Ret(); |
4350 | 4350 |
4351 __ bind(&skip_to_incremental_noncompacting); | 4351 __ bind(&skip_to_incremental_noncompacting); |
4352 GenerateIncremental(masm, INCREMENTAL); | 4352 GenerateIncremental(masm, INCREMENTAL); |
4353 | 4353 |
4354 __ bind(&skip_to_incremental_compacting); | 4354 __ bind(&skip_to_incremental_compacting); |
4355 GenerateIncremental(masm, INCREMENTAL_COMPACTION); | 4355 GenerateIncremental(masm, INCREMENTAL_COMPACTION); |
4356 | 4356 |
4357 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. | 4357 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. |
4358 // Will be checked in IncrementalMarking::ActivateGeneratedStub. | 4358 // Will be checked in IncrementalMarking::ActivateGeneratedStub. |
4359 ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); | 4359 DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); |
4360 ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); | 4360 DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); |
4361 PatchBranchIntoNop(masm, 0); | 4361 PatchBranchIntoNop(masm, 0); |
4362 PatchBranchIntoNop(masm, Assembler::kInstrSize); | 4362 PatchBranchIntoNop(masm, Assembler::kInstrSize); |
4363 } | 4363 } |
4364 | 4364 |
4365 | 4365 |
4366 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 4366 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
4367 regs_.Save(masm); | 4367 regs_.Save(masm); |
4368 | 4368 |
4369 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 4369 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
4370 Label dont_need_remembered_set; | 4370 Label dont_need_remembered_set; |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4402 __ Ret(); | 4402 __ Ret(); |
4403 } | 4403 } |
4404 | 4404 |
4405 | 4405 |
4406 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | 4406 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { |
4407 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | 4407 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
4408 int argument_count = 3; | 4408 int argument_count = 3; |
4409 __ PrepareCallCFunction(argument_count, regs_.scratch0()); | 4409 __ PrepareCallCFunction(argument_count, regs_.scratch0()); |
4410 Register address = | 4410 Register address = |
4411 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 4411 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); |
4412 ASSERT(!address.is(regs_.object())); | 4412 DCHECK(!address.is(regs_.object())); |
4413 ASSERT(!address.is(r0)); | 4413 DCHECK(!address.is(r0)); |
4414 __ Move(address, regs_.address()); | 4414 __ Move(address, regs_.address()); |
4415 __ Move(r0, regs_.object()); | 4415 __ Move(r0, regs_.object()); |
4416 __ Move(r1, address); | 4416 __ Move(r1, address); |
4417 __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); | 4417 __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); |
4418 | 4418 |
4419 AllowExternalCallThatCantCauseGC scope(masm); | 4419 AllowExternalCallThatCantCauseGC scope(masm); |
4420 __ CallCFunction( | 4420 __ CallCFunction( |
4421 ExternalReference::incremental_marking_record_write_function(isolate()), | 4421 ExternalReference::incremental_marking_record_write_function(isolate()), |
4422 argument_count); | 4422 argument_count); |
4423 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); | 4423 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); |
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4609 const RegList kSavedRegs = | 4609 const RegList kSavedRegs = |
4610 1 << 0 | // r0 | 4610 1 << 0 | // r0 |
4611 1 << 1 | // r1 | 4611 1 << 1 | // r1 |
4612 1 << 2 | // r2 | 4612 1 << 2 | // r2 |
4613 1 << 3 | // r3 | 4613 1 << 3 | // r3 |
4614 1 << 5 | // r5 | 4614 1 << 5 | // r5 |
4615 1 << 9; // r9 | 4615 1 << 9; // r9 |
4616 // We also save lr, so the count here is one higher than the mask indicates. | 4616 // We also save lr, so the count here is one higher than the mask indicates. |
4617 const int32_t kNumSavedRegs = 7; | 4617 const int32_t kNumSavedRegs = 7; |
4618 | 4618 |
4619 ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved); | 4619 DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved); |
4620 | 4620 |
4621 // Save all caller-save registers as this may be called from anywhere. | 4621 // Save all caller-save registers as this may be called from anywhere. |
4622 __ stm(db_w, sp, kSavedRegs | lr.bit()); | 4622 __ stm(db_w, sp, kSavedRegs | lr.bit()); |
4623 | 4623 |
4624 // Compute the function's address for the first argument. | 4624 // Compute the function's address for the first argument. |
4625 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart)); | 4625 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart)); |
4626 | 4626 |
4627 // The caller's return address is above the saved temporaries. | 4627 // The caller's return address is above the saved temporaries. |
4628 // Grab that for the second argument to the hook. | 4628 // Grab that for the second argument to the hook. |
4629 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); | 4629 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); |
4630 | 4630 |
4631 // Align the stack if necessary. | 4631 // Align the stack if necessary. |
4632 int frame_alignment = masm->ActivationFrameAlignment(); | 4632 int frame_alignment = masm->ActivationFrameAlignment(); |
4633 if (frame_alignment > kPointerSize) { | 4633 if (frame_alignment > kPointerSize) { |
4634 __ mov(r5, sp); | 4634 __ mov(r5, sp); |
4635 ASSERT(IsPowerOf2(frame_alignment)); | 4635 DCHECK(IsPowerOf2(frame_alignment)); |
4636 __ and_(sp, sp, Operand(-frame_alignment)); | 4636 __ and_(sp, sp, Operand(-frame_alignment)); |
4637 } | 4637 } |
4638 | 4638 |
4639 #if V8_HOST_ARCH_ARM | 4639 #if V8_HOST_ARCH_ARM |
4640 int32_t entry_hook = | 4640 int32_t entry_hook = |
4641 reinterpret_cast<int32_t>(isolate()->function_entry_hook()); | 4641 reinterpret_cast<int32_t>(isolate()->function_entry_hook()); |
4642 __ mov(ip, Operand(entry_hook)); | 4642 __ mov(ip, Operand(entry_hook)); |
4643 #else | 4643 #else |
4644 // Under the simulator we need to indirect the entry hook through a | 4644 // Under the simulator we need to indirect the entry hook through a |
4645 // trampoline function at a known address. | 4645 // trampoline function at a known address. |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4689 | 4689 |
4690 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | 4690 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, |
4691 AllocationSiteOverrideMode mode) { | 4691 AllocationSiteOverrideMode mode) { |
4692 // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | 4692 // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) |
4693 // r3 - kind (if mode != DISABLE_ALLOCATION_SITES) | 4693 // r3 - kind (if mode != DISABLE_ALLOCATION_SITES) |
4694 // r0 - number of arguments | 4694 // r0 - number of arguments |
4695 // r1 - constructor? | 4695 // r1 - constructor? |
4696 // sp[0] - last argument | 4696 // sp[0] - last argument |
4697 Label normal_sequence; | 4697 Label normal_sequence; |
4698 if (mode == DONT_OVERRIDE) { | 4698 if (mode == DONT_OVERRIDE) { |
4699 ASSERT(FAST_SMI_ELEMENTS == 0); | 4699 DCHECK(FAST_SMI_ELEMENTS == 0); |
4700 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 4700 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1); |
4701 ASSERT(FAST_ELEMENTS == 2); | 4701 DCHECK(FAST_ELEMENTS == 2); |
4702 ASSERT(FAST_HOLEY_ELEMENTS == 3); | 4702 DCHECK(FAST_HOLEY_ELEMENTS == 3); |
4703 ASSERT(FAST_DOUBLE_ELEMENTS == 4); | 4703 DCHECK(FAST_DOUBLE_ELEMENTS == 4); |
4704 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | 4704 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); |
4705 | 4705 |
4706 // is the low bit set? If so, we are holey and that is good. | 4706 // is the low bit set? If so, we are holey and that is good. |
4707 __ tst(r3, Operand(1)); | 4707 __ tst(r3, Operand(1)); |
4708 __ b(ne, &normal_sequence); | 4708 __ b(ne, &normal_sequence); |
4709 } | 4709 } |
4710 | 4710 |
4711 // look at the first argument | 4711 // look at the first argument |
4712 __ ldr(r5, MemOperand(sp, 0)); | 4712 __ ldr(r5, MemOperand(sp, 0)); |
4713 __ cmp(r5, Operand::Zero()); | 4713 __ cmp(r5, Operand::Zero()); |
4714 __ b(eq, &normal_sequence); | 4714 __ b(eq, &normal_sequence); |
(...skipping 298 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5013 // Prepare arguments. | 5013 // Prepare arguments. |
5014 __ mov(scratch, sp); | 5014 __ mov(scratch, sp); |
5015 | 5015 |
5016 // Allocate the v8::Arguments structure in the arguments' space since | 5016 // Allocate the v8::Arguments structure in the arguments' space since |
5017 // it's not controlled by GC. | 5017 // it's not controlled by GC. |
5018 const int kApiStackSpace = 4; | 5018 const int kApiStackSpace = 4; |
5019 | 5019 |
5020 FrameScope frame_scope(masm, StackFrame::MANUAL); | 5020 FrameScope frame_scope(masm, StackFrame::MANUAL); |
5021 __ EnterExitFrame(false, kApiStackSpace); | 5021 __ EnterExitFrame(false, kApiStackSpace); |
5022 | 5022 |
5023 ASSERT(!api_function_address.is(r0) && !scratch.is(r0)); | 5023 DCHECK(!api_function_address.is(r0) && !scratch.is(r0)); |
5024 // r0 = FunctionCallbackInfo& | 5024 // r0 = FunctionCallbackInfo& |
5025 // Arguments is after the return address. | 5025 // Arguments is after the return address. |
5026 __ add(r0, sp, Operand(1 * kPointerSize)); | 5026 __ add(r0, sp, Operand(1 * kPointerSize)); |
5027 // FunctionCallbackInfo::implicit_args_ | 5027 // FunctionCallbackInfo::implicit_args_ |
5028 __ str(scratch, MemOperand(r0, 0 * kPointerSize)); | 5028 __ str(scratch, MemOperand(r0, 0 * kPointerSize)); |
5029 // FunctionCallbackInfo::values_ | 5029 // FunctionCallbackInfo::values_ |
5030 __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | 5030 __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); |
5031 __ str(ip, MemOperand(r0, 1 * kPointerSize)); | 5031 __ str(ip, MemOperand(r0, 1 * kPointerSize)); |
5032 // FunctionCallbackInfo::length_ = argc | 5032 // FunctionCallbackInfo::length_ = argc |
5033 __ mov(ip, Operand(argc)); | 5033 __ mov(ip, Operand(argc)); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5092 MemOperand(fp, 6 * kPointerSize), | 5092 MemOperand(fp, 6 * kPointerSize), |
5093 NULL); | 5093 NULL); |
5094 } | 5094 } |
5095 | 5095 |
5096 | 5096 |
5097 #undef __ | 5097 #undef __ |
5098 | 5098 |
5099 } } // namespace v8::internal | 5099 } } // namespace v8::internal |
5100 | 5100 |
5101 #endif // V8_TARGET_ARCH_ARM | 5101 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |