Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 6614010: [Isolates] Merge 6700:7030 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after
391 // either r0 or r1 is not a number (not smi and not heap number object) the 391 // either r0 or r1 is not a number (not smi and not heap number object) the
392 // not_number label is jumped to with r0 and r1 intact. 392 // not_number label is jumped to with r0 and r1 intact.
393 static void LoadOperands(MacroAssembler* masm, 393 static void LoadOperands(MacroAssembler* masm,
394 FloatingPointHelper::Destination destination, 394 FloatingPointHelper::Destination destination,
395 Register heap_number_map, 395 Register heap_number_map,
396 Register scratch1, 396 Register scratch1,
397 Register scratch2, 397 Register scratch2,
398 Label* not_number); 398 Label* not_number);
399 399
400 // Loads the number from object into dst as a 32-bit integer if possible. If 400 // Loads the number from object into dst as a 32-bit integer if possible. If
401 // the object is not a 32-bit integer control continues at the label 401 // the object cannot be converted to a 32-bit integer control continues at
402 // not_int32. If VFP is supported double_scratch is used but not scratch2. 402 // the label not_int32. If VFP is supported double_scratch is used
403 // but not scratch2.
404 // Floating point value in the 32-bit integer range will be rounded
405 // to an integer.
403 static void LoadNumberAsInteger(MacroAssembler* masm, 406 static void LoadNumberAsInteger(MacroAssembler* masm,
404 Register object, 407 Register object,
405 Register dst, 408 Register dst,
406 Register heap_number_map, 409 Register heap_number_map,
407 Register scratch1, 410 Register scratch1,
408 Register scratch2, 411 Register scratch2,
409 DwVfpRegister double_scratch, 412 DwVfpRegister double_scratch,
410 Label* not_int32); 413 Label* not_int32);
411 414
415 // Load the number from object into double_dst in the double format.
416 // Control will jump to not_int32 if the value cannot be exactly represented
417 // by a 32-bit integer.
418 // Floating point value in the 32-bit integer range that are not exact integer
419 // won't be loaded.
420 static void LoadNumberAsInt32Double(MacroAssembler* masm,
421 Register object,
422 Destination destination,
423 DwVfpRegister double_dst,
424 Register dst1,
425 Register dst2,
426 Register heap_number_map,
427 Register scratch1,
428 Register scratch2,
429 SwVfpRegister single_scratch,
430 Label* not_int32);
431
432 // Loads the number from object into dst as a 32-bit integer.
433 // Control will jump to not_int32 if the object cannot be exactly represented
434 // by a 32-bit integer.
435 // Floating point value in the 32-bit integer range that are not exact integer
436 // won't be converted.
437 // scratch3 is not used when VFP3 is supported.
438 static void LoadNumberAsInt32(MacroAssembler* masm,
439 Register object,
440 Register dst,
441 Register heap_number_map,
442 Register scratch1,
443 Register scratch2,
444 Register scratch3,
445 DwVfpRegister double_scratch,
446 Label* not_int32);
447
448 // Generate non VFP3 code to check if a double can be exactly represented by a
449 // 32-bit integer. This does not check for 0 or -0, which need
450 // to be checked for separately.
451 // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
452 // through otherwise.
453 // src1 and src2 will be cloberred.
454 //
455 // Expected input:
456 // - src1: higher (exponent) part of the double value.
457 // - src2: lower (mantissa) part of the double value.
458 // Output status:
459 // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
460 // - src2: contains 1.
461 // - other registers are clobbered.
462 static void DoubleIs32BitInteger(MacroAssembler* masm,
463 Register src1,
464 Register src2,
465 Register dst,
466 Register scratch,
467 Label* not_int32);
468
469 // Generates code to call a C function to do a double operation using core
470 // registers. (Used when VFP3 is not supported.)
471 // This code never falls through, but returns with a heap number containing
472 // the result in r0.
473 // Register heapnumber_result must be a heap number in which the
474 // result of the operation will be stored.
475 // Requires the following layout on entry:
476 // r0: Left value (least significant part of mantissa).
477 // r1: Left value (sign, exponent, top of mantissa).
478 // r2: Right value (least significant part of mantissa).
479 // r3: Right value (sign, exponent, top of mantissa).
480 static void CallCCodeForDoubleOperation(MacroAssembler* masm,
481 Token::Value op,
482 Register heap_number_result,
483 Register scratch);
484
412 private: 485 private:
413 static void LoadNumber(MacroAssembler* masm, 486 static void LoadNumber(MacroAssembler* masm,
414 FloatingPointHelper::Destination destination, 487 FloatingPointHelper::Destination destination,
415 Register object, 488 Register object,
416 DwVfpRegister dst, 489 DwVfpRegister dst,
417 Register dst1, 490 Register dst1,
418 Register dst2, 491 Register dst2,
419 Register heap_number_map, 492 Register heap_number_map,
420 Register scratch1, 493 Register scratch1,
421 Register scratch2, 494 Register scratch2,
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
554 __ b(ne, not_int32); 627 __ b(ne, not_int32);
555 __ ConvertToInt32( 628 __ ConvertToInt32(
556 object, dst, scratch1, scratch2, double_scratch, not_int32); 629 object, dst, scratch1, scratch2, double_scratch, not_int32);
557 __ jmp(&done); 630 __ jmp(&done);
558 __ bind(&is_smi); 631 __ bind(&is_smi);
559 __ SmiUntag(dst, object); 632 __ SmiUntag(dst, object);
560 __ bind(&done); 633 __ bind(&done);
561 } 634 }
562 635
563 636
637 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
638 Register object,
639 Destination destination,
640 DwVfpRegister double_dst,
641 Register dst1,
642 Register dst2,
643 Register heap_number_map,
644 Register scratch1,
645 Register scratch2,
646 SwVfpRegister single_scratch,
647 Label* not_int32) {
648 ASSERT(!scratch1.is(object) && !scratch2.is(object));
649 ASSERT(!scratch1.is(scratch2));
650 ASSERT(!heap_number_map.is(object) &&
651 !heap_number_map.is(scratch1) &&
652 !heap_number_map.is(scratch2));
653
654 Label done, obj_is_not_smi;
655
656 __ JumpIfNotSmi(object, &obj_is_not_smi);
657 __ SmiUntag(scratch1, object);
658 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
659 CpuFeatures::Scope scope(VFP3);
660 __ vmov(single_scratch, scratch1);
661 __ vcvt_f64_s32(double_dst, single_scratch);
662 if (destination == kCoreRegisters) {
663 __ vmov(dst1, dst2, double_dst);
664 }
665 } else {
666 Label fewer_than_20_useful_bits;
667 // Expected output:
668 // | dst1 | dst2 |
669 // | s | exp | mantissa |
670
671 // Check for zero.
672 __ cmp(scratch1, Operand(0));
673 __ mov(dst1, scratch1);
674 __ mov(dst2, scratch1);
675 __ b(eq, &done);
676
677 // Preload the sign of the value.
678 __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
679 // Get the absolute value of the object (as an unsigned integer).
680 __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
681
682 // Get mantisssa[51:20].
683
684 // Get the position of the first set bit.
685 __ CountLeadingZeros(dst2, scratch1, scratch2);
686 __ rsb(dst2, dst2, Operand(31));
687
688 // Set the exponent.
689 __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
690 __ Bfi(dst1, scratch2, scratch2,
691 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
692
693 // Clear the first non null bit.
694 __ mov(scratch2, Operand(1));
695 __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
696
697 __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
698 // Get the number of bits to set in the lower part of the mantissa.
699 __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
700 __ b(mi, &fewer_than_20_useful_bits);
701 // Set the higher 20 bits of the mantissa.
702 __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
703 __ rsb(scratch2, scratch2, Operand(32));
704 __ mov(dst2, Operand(scratch1, LSL, scratch2));
705 __ b(&done);
706
707 __ bind(&fewer_than_20_useful_bits);
708 __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
709 __ mov(scratch2, Operand(scratch1, LSL, scratch2));
710 __ orr(dst1, dst1, scratch2);
711 // Set dst2 to 0.
712 __ mov(dst2, Operand(0));
713 }
714
715 __ b(&done);
716
717 __ bind(&obj_is_not_smi);
718 if (FLAG_debug_code) {
719 __ AbortIfNotRootValue(heap_number_map,
720 Heap::kHeapNumberMapRootIndex,
721 "HeapNumberMap register clobbered.");
722 }
723 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
724
725 // Load the number.
726 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
727 CpuFeatures::Scope scope(VFP3);
728 // Load the double value.
729 __ sub(scratch1, object, Operand(kHeapObjectTag));
730 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
731
732 __ EmitVFPTruncate(kRoundToZero,
733 single_scratch,
734 double_dst,
735 scratch1,
736 scratch2,
737 kCheckForInexactConversion);
738
739 // Jump to not_int32 if the operation did not succeed.
740 __ b(ne, not_int32);
741
742 if (destination == kCoreRegisters) {
743 __ vmov(dst1, dst2, double_dst);
744 }
745
746 } else {
747 ASSERT(!scratch1.is(object) && !scratch2.is(object));
748 // Load the double value in the destination registers..
749 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
750
751 // Check for 0 and -0.
752 __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
753 __ orr(scratch1, scratch1, Operand(dst2));
754 __ cmp(scratch1, Operand(0));
755 __ b(eq, &done);
756
757 // Check that the value can be exactly represented by a 32-bit integer.
758 // Jump to not_int32 if that's not the case.
759 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
760
761 // dst1 and dst2 were trashed. Reload the double value.
762 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
763 }
764
765 __ bind(&done);
766 }
767
768
769 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
770 Register object,
771 Register dst,
772 Register heap_number_map,
773 Register scratch1,
774 Register scratch2,
775 Register scratch3,
776 DwVfpRegister double_scratch,
777 Label* not_int32) {
778 ASSERT(!dst.is(object));
779 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
780 ASSERT(!scratch1.is(scratch2) &&
781 !scratch1.is(scratch3) &&
782 !scratch2.is(scratch3));
783
784 Label done;
785
786 // Untag the object into the destination register.
787 __ SmiUntag(dst, object);
788 // Just return if the object is a smi.
789 __ JumpIfSmi(object, &done);
790
791 if (FLAG_debug_code) {
792 __ AbortIfNotRootValue(heap_number_map,
793 Heap::kHeapNumberMapRootIndex,
794 "HeapNumberMap register clobbered.");
795 }
796 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
797
798 // Object is a heap number.
799 // Convert the floating point value to a 32-bit integer.
800 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
801 CpuFeatures::Scope scope(VFP3);
802 SwVfpRegister single_scratch = double_scratch.low();
803 // Load the double value.
804 __ sub(scratch1, object, Operand(kHeapObjectTag));
805 __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
806
807 __ EmitVFPTruncate(kRoundToZero,
808 single_scratch,
809 double_scratch,
810 scratch1,
811 scratch2,
812 kCheckForInexactConversion);
813
814 // Jump to not_int32 if the operation did not succeed.
815 __ b(ne, not_int32);
816 // Get the result in the destination register.
817 __ vmov(dst, single_scratch);
818
819 } else {
820 // Load the double value in the destination registers.
821 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
822 __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
823
824 // Check for 0 and -0.
825 __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
826 __ orr(dst, scratch2, Operand(dst));
827 __ cmp(dst, Operand(0));
828 __ b(eq, &done);
829
830 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
831
832 // Registers state after DoubleIs32BitInteger.
833 // dst: mantissa[51:20].
834 // scratch2: 1
835
836 // Shift back the higher bits of the mantissa.
837 __ mov(dst, Operand(dst, LSR, scratch3));
838 // Set the implicit first bit.
839 __ rsb(scratch3, scratch3, Operand(32));
840 __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
841 // Set the sign.
842 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
843 __ tst(scratch1, Operand(HeapNumber::kSignMask));
844 __ rsb(dst, dst, Operand(0), LeaveCC, mi);
845 }
846
847 __ bind(&done);
848 }
849
850
851 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
852 Register src1,
853 Register src2,
854 Register dst,
855 Register scratch,
856 Label* not_int32) {
857 // Get exponent alone in scratch.
858 __ Ubfx(scratch,
859 src1,
860 HeapNumber::kExponentShift,
861 HeapNumber::kExponentBits);
862
863 // Substract the bias from the exponent.
864 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
865
866 // src1: higher (exponent) part of the double value.
867 // src2: lower (mantissa) part of the double value.
868 // scratch: unbiased exponent.
869
870 // Fast cases. Check for obvious non 32-bit integer values.
871 // Negative exponent cannot yield 32-bit integers.
872 __ b(mi, not_int32);
873 // Exponent greater than 31 cannot yield 32-bit integers.
874 // Also, a positive value with an exponent equal to 31 is outside of the
875 // signed 32-bit integer range.
876 __ tst(src1, Operand(HeapNumber::kSignMask));
877 __ cmp(scratch, Operand(30), eq); // Executed for positive. If exponent is 30
878 // the gt condition will be "correct" and
879 // the next instruction will be skipped.
880 __ cmp(scratch, Operand(31), ne); // Executed for negative and positive where
881 // exponent is not 30.
882 __ b(gt, not_int32);
883 // - Bits [21:0] in the mantissa are not null.
884 __ tst(src2, Operand(0x3fffff));
885 __ b(ne, not_int32);
886
887 // Otherwise the exponent needs to be big enough to shift left all the
888 // non zero bits left. So we need the (30 - exponent) last bits of the
889 // 31 higher bits of the mantissa to be null.
890 // Because bits [21:0] are null, we can check instead that the
891 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
892
893 // Get the 32 higher bits of the mantissa in dst.
894 __ Ubfx(dst,
895 src2,
896 HeapNumber::kMantissaBitsInTopWord,
897 32 - HeapNumber::kMantissaBitsInTopWord);
898 __ orr(dst,
899 dst,
900 Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
901
902 // Create the mask and test the lower bits (of the higher bits).
903 __ rsb(scratch, scratch, Operand(32));
904 __ mov(src2, Operand(1));
905 __ mov(src1, Operand(src2, LSL, scratch));
906 __ sub(src1, src1, Operand(1));
907 __ tst(dst, src1);
908 __ b(ne, not_int32);
909 }
910
911
912 void FloatingPointHelper::CallCCodeForDoubleOperation(
913 MacroAssembler* masm,
914 Token::Value op,
915 Register heap_number_result,
916 Register scratch) {
917 // Using core registers:
918 // r0: Left value (least significant part of mantissa).
919 // r1: Left value (sign, exponent, top of mantissa).
920 // r2: Right value (least significant part of mantissa).
921 // r3: Right value (sign, exponent, top of mantissa).
922
923 // Assert that heap_number_result is callee-saved.
924 // We currently always use r5 to pass it.
925 ASSERT(heap_number_result.is(r5));
926
927 // Push the current return address before the C call. Return will be
928 // through pop(pc) below.
929 __ push(lr);
930 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
931 // Call C routine that may not cause GC or other trouble.
932 __ CallCFunction(ExternalReference::double_fp_operation(op), 4);
933 // Store answer in the overwritable heap number.
934 #if !defined(USE_ARM_EABI)
935 // Double returned in fp coprocessor register 0 and 1, encoded as
936 // register cr8. Offsets must be divisible by 4 for coprocessor so we
937 // need to substract the tag from heap_number_result.
938 __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
939 __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
940 #else
941 // Double returned in registers 0 and 1.
942 __ Strd(r0, r1, FieldMemOperand(heap_number_result,
943 HeapNumber::kValueOffset));
944 #endif
945 // Place heap_number_result in r0 and return to the pushed return address.
946 __ mov(r0, Operand(heap_number_result));
947 __ pop(pc);
948 }
949
564 950
565 // See comment for class. 951 // See comment for class.
566 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 952 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
567 Label max_negative_int; 953 Label max_negative_int;
568 // the_int_ has the answer which is a signed int32 but not a Smi. 954 // the_int_ has the answer which is a signed int32 but not a Smi.
569 // We test for the special value that has a different exponent. This test 955 // We test for the special value that has a different exponent. This test
570 // has the neat side effect of setting the flags according to the sign. 956 // has the neat side effect of setting the flags according to the sign.
571 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 957 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
572 __ cmp(the_int_, Operand(0x80000000u)); 958 __ cmp(the_int_, Operand(0x80000000u));
573 __ b(eq, &max_negative_int); 959 __ b(eq, &max_negative_int);
(...skipping 716 matching lines...) Expand 10 before | Expand all | Expand 10 after
1290 1676
1291 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 1677 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1292 // tagged as a small integer. 1678 // tagged as a small integer.
1293 __ InvokeBuiltin(native, JUMP_JS); 1679 __ InvokeBuiltin(native, JUMP_JS);
1294 } 1680 }
1295 1681
1296 1682
1297 // This stub does not handle the inlined cases (Smis, Booleans, undefined). 1683 // This stub does not handle the inlined cases (Smis, Booleans, undefined).
1298 // The stub returns zero for false, and a non-zero value for true. 1684 // The stub returns zero for false, and a non-zero value for true.
1299 void ToBooleanStub::Generate(MacroAssembler* masm) { 1685 void ToBooleanStub::Generate(MacroAssembler* masm) {
1686 // This stub uses VFP3 instructions.
1687 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
1688
1300 Label false_result; 1689 Label false_result;
1301 Label not_heap_number; 1690 Label not_heap_number;
1302 Register scratch = r7; 1691 Register scratch = r9.is(tos_) ? r7 : r9;
1303 1692
1304 __ LoadRoot(ip, Heap::kNullValueRootIndex); 1693 __ LoadRoot(ip, Heap::kNullValueRootIndex);
1305 __ cmp(tos_, ip); 1694 __ cmp(tos_, ip);
1306 __ b(eq, &false_result); 1695 __ b(eq, &false_result);
1307 1696
1308 // HeapNumber => false iff +0, -0, or NaN. 1697 // HeapNumber => false iff +0, -0, or NaN.
1309 __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); 1698 __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
1310 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 1699 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
1311 __ cmp(scratch, ip); 1700 __ cmp(scratch, ip);
1312 __ b(&not_heap_number, ne); 1701 __ b(&not_heap_number, ne);
(...skipping 1272 matching lines...) Expand 10 before | Expand all | Expand 10 after
2585 __ Ret(); 2974 __ Ret();
2586 break; 2975 break;
2587 case Token::BIT_AND: 2976 case Token::BIT_AND:
2588 __ and_(right, left, Operand(right)); 2977 __ and_(right, left, Operand(right));
2589 __ Ret(); 2978 __ Ret();
2590 break; 2979 break;
2591 case Token::BIT_XOR: 2980 case Token::BIT_XOR:
2592 __ eor(right, left, Operand(right)); 2981 __ eor(right, left, Operand(right));
2593 __ Ret(); 2982 __ Ret();
2594 break; 2983 break;
2984 case Token::SAR:
2985 // Remove tags from right operand.
2986 __ GetLeastBitsFromSmi(scratch1, right, 5);
2987 __ mov(right, Operand(left, ASR, scratch1));
2988 // Smi tag result.
2989 __ bic(right, right, Operand(kSmiTagMask));
2990 __ Ret();
2991 break;
2992 case Token::SHR:
2993 // Remove tags from operands. We can't do this on a 31 bit number
2994 // because then the 0s get shifted into bit 30 instead of bit 31.
2995 __ SmiUntag(scratch1, left);
2996 __ GetLeastBitsFromSmi(scratch2, right, 5);
2997 __ mov(scratch1, Operand(scratch1, LSR, scratch2));
2998 // Unsigned shift is not allowed to produce a negative number, so
2999 // check the sign bit and the sign bit after Smi tagging.
3000 __ tst(scratch1, Operand(0xc0000000));
3001 __ b(ne, &not_smi_result);
3002 // Smi tag result.
3003 __ SmiTag(right, scratch1);
3004 __ Ret();
3005 break;
3006 case Token::SHL:
3007 // Remove tags from operands.
3008 __ SmiUntag(scratch1, left);
3009 __ GetLeastBitsFromSmi(scratch2, right, 5);
3010 __ mov(scratch1, Operand(scratch1, LSL, scratch2));
3011 // Check that the signed result fits in a Smi.
3012 __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
3013 __ b(mi, &not_smi_result);
3014 __ SmiTag(right, scratch1);
3015 __ Ret();
3016 break;
2595 default: 3017 default:
2596 UNREACHABLE(); 3018 UNREACHABLE();
2597 } 3019 }
2598 __ bind(&not_smi_result); 3020 __ bind(&not_smi_result);
2599 } 3021 }
2600 3022
2601 3023
2602 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, 3024 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2603 bool smi_operands, 3025 bool smi_operands,
2604 Label* not_numbers, 3026 Label* not_numbers,
(...skipping 21 matching lines...) Expand all
2626 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 3048 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2627 // depending on whether VFP3 is available or not. 3049 // depending on whether VFP3 is available or not.
2628 FloatingPointHelper::Destination destination = 3050 FloatingPointHelper::Destination destination =
2629 Isolate::Current()->cpu_features()->IsSupported(VFP3) && 3051 Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
2630 op_ != Token::MOD ? 3052 op_ != Token::MOD ?
2631 FloatingPointHelper::kVFPRegisters : 3053 FloatingPointHelper::kVFPRegisters :
2632 FloatingPointHelper::kCoreRegisters; 3054 FloatingPointHelper::kCoreRegisters;
2633 3055
2634 // Allocate new heap number for result. 3056 // Allocate new heap number for result.
2635 Register result = r5; 3057 Register result = r5;
2636 __ AllocateHeapNumber( 3058 GenerateHeapResultAllocation(
2637 result, scratch1, scratch2, heap_number_map, gc_required); 3059 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2638 3060
2639 // Load the operands. 3061 // Load the operands.
2640 if (smi_operands) { 3062 if (smi_operands) {
2641 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); 3063 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2642 } else { 3064 } else {
2643 FloatingPointHelper::LoadOperands(masm, 3065 FloatingPointHelper::LoadOperands(masm,
2644 destination, 3066 destination,
2645 heap_number_map, 3067 heap_number_map,
2646 scratch1, 3068 scratch1,
2647 scratch2, 3069 scratch2,
(...skipping 21 matching lines...) Expand all
2669 break; 3091 break;
2670 default: 3092 default:
2671 UNREACHABLE(); 3093 UNREACHABLE();
2672 } 3094 }
2673 3095
2674 __ sub(r0, result, Operand(kHeapObjectTag)); 3096 __ sub(r0, result, Operand(kHeapObjectTag));
2675 __ vstr(d5, r0, HeapNumber::kValueOffset); 3097 __ vstr(d5, r0, HeapNumber::kValueOffset);
2676 __ add(r0, r0, Operand(kHeapObjectTag)); 3098 __ add(r0, r0, Operand(kHeapObjectTag));
2677 __ Ret(); 3099 __ Ret();
2678 } else { 3100 } else {
2679 // Using core registers: 3101 // Call the C function to handle the double operation.
2680 // r0: Left value (least significant part of mantissa). 3102 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2681 // r1: Left value (sign, exponent, top of mantissa). 3103 op_,
2682 // r2: Right value (least significant part of mantissa). 3104 result,
2683 // r3: Right value (sign, exponent, top of mantissa). 3105 scratch1);
2684
2685 // Push the current return address before the C call. Return will be
2686 // through pop(pc) below.
2687 __ push(lr);
2688 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2689 // Call C routine that may not cause GC or other trouble. r5 is callee
2690 // save.
2691 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2692 // Store answer in the overwritable heap number.
2693 #if !defined(USE_ARM_EABI)
2694 // Double returned in fp coprocessor register 0 and 1, encoded as
2695 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2696 // need to substract the tag from r5.
2697 __ sub(scratch1, result, Operand(kHeapObjectTag));
2698 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2699 #else
2700 // Double returned in registers 0 and 1.
2701 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
2702 #endif
2703 // Plase result in r0 and return to the pushed return address.
2704 __ mov(r0, Operand(result));
2705 __ pop(pc);
2706 } 3106 }
2707 break; 3107 break;
2708 } 3108 }
2709 case Token::BIT_OR: 3109 case Token::BIT_OR:
2710 case Token::BIT_XOR: 3110 case Token::BIT_XOR:
2711 case Token::BIT_AND: { 3111 case Token::BIT_AND:
3112 case Token::SAR:
3113 case Token::SHR:
3114 case Token::SHL: {
2712 if (smi_operands) { 3115 if (smi_operands) {
2713 __ SmiUntag(r3, left); 3116 __ SmiUntag(r3, left);
2714 __ SmiUntag(r2, right); 3117 __ SmiUntag(r2, right);
2715 } else { 3118 } else {
2716 // Convert operands to 32-bit integers. Right in r2 and left in r3. 3119 // Convert operands to 32-bit integers. Right in r2 and left in r3.
2717 FloatingPointHelper::LoadNumberAsInteger(masm, 3120 FloatingPointHelper::LoadNumberAsInteger(masm,
2718 left, 3121 left,
2719 r3, 3122 r3,
2720 heap_number_map, 3123 heap_number_map,
2721 scratch1, 3124 scratch1,
2722 scratch2, 3125 scratch2,
2723 d0, 3126 d0,
2724 not_numbers); 3127 not_numbers);
2725 FloatingPointHelper::LoadNumberAsInteger(masm, 3128 FloatingPointHelper::LoadNumberAsInteger(masm,
2726 right, 3129 right,
2727 r2, 3130 r2,
2728 heap_number_map, 3131 heap_number_map,
2729 scratch1, 3132 scratch1,
2730 scratch2, 3133 scratch2,
2731 d0, 3134 d0,
2732 not_numbers); 3135 not_numbers);
2733 } 3136 }
3137
3138 Label result_not_a_smi;
2734 switch (op_) { 3139 switch (op_) {
2735 case Token::BIT_OR: 3140 case Token::BIT_OR:
2736 __ orr(r2, r3, Operand(r2)); 3141 __ orr(r2, r3, Operand(r2));
2737 break; 3142 break;
2738 case Token::BIT_XOR: 3143 case Token::BIT_XOR:
2739 __ eor(r2, r3, Operand(r2)); 3144 __ eor(r2, r3, Operand(r2));
2740 break; 3145 break;
2741 case Token::BIT_AND: 3146 case Token::BIT_AND:
2742 __ and_(r2, r3, Operand(r2)); 3147 __ and_(r2, r3, Operand(r2));
2743 break; 3148 break;
3149 case Token::SAR:
3150 // Use only the 5 least significant bits of the shift count.
3151 __ GetLeastBitsFromInt32(r2, r2, 5);
3152 __ mov(r2, Operand(r3, ASR, r2));
3153 break;
3154 case Token::SHR:
3155 // Use only the 5 least significant bits of the shift count.
3156 __ GetLeastBitsFromInt32(r2, r2, 5);
3157 __ mov(r2, Operand(r3, LSR, r2), SetCC);
3158 // SHR is special because it is required to produce a positive answer.
3159 // The code below for writing into heap numbers isn't capable of
3160 // writing the register as an unsigned int so we go to slow case if we
3161 // hit this case.
3162 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3163 __ b(mi, &result_not_a_smi);
3164 } else {
3165 __ b(mi, not_numbers);
3166 }
3167 break;
3168 case Token::SHL:
3169 // Use only the 5 least significant bits of the shift count.
3170 __ GetLeastBitsFromInt32(r2, r2, 5);
3171 __ mov(r2, Operand(r3, LSL, r2));
3172 break;
2744 default: 3173 default:
2745 UNREACHABLE(); 3174 UNREACHABLE();
2746 } 3175 }
2747 3176
2748 Label result_not_a_smi;
2749 // Check that the *signed* result fits in a smi. 3177 // Check that the *signed* result fits in a smi.
2750 __ add(r3, r2, Operand(0x40000000), SetCC); 3178 __ add(r3, r2, Operand(0x40000000), SetCC);
2751 __ b(mi, &result_not_a_smi); 3179 __ b(mi, &result_not_a_smi);
2752 __ SmiTag(r0, r2); 3180 __ SmiTag(r0, r2);
2753 __ Ret(); 3181 __ Ret();
2754 3182
2755 // Allocate new heap number for result. 3183 // Allocate new heap number for result.
2756 __ bind(&result_not_a_smi); 3184 __ bind(&result_not_a_smi);
2757 __ AllocateHeapNumber( 3185 Register result = r5;
2758 r5, scratch1, scratch2, heap_number_map, gc_required); 3186 if (smi_operands) {
3187 __ AllocateHeapNumber(
3188 result, scratch1, scratch2, heap_number_map, gc_required);
3189 } else {
3190 GenerateHeapResultAllocation(
3191 masm, result, heap_number_map, scratch1, scratch2, gc_required);
3192 }
2759 3193
2760 // r2: Answer as signed int32. 3194 // r2: Answer as signed int32.
2761 // r5: Heap number to write answer into. 3195 // r5: Heap number to write answer into.
2762 3196
2763 // Nothing can go wrong now, so move the heap number to r0, which is the 3197 // Nothing can go wrong now, so move the heap number to r0, which is the
2764 // result. 3198 // result.
2765 __ mov(r0, Operand(r5)); 3199 __ mov(r0, Operand(r5));
2766 3200
2767 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 3201 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
2768 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. 3202 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
3203 // mentioned above SHR needs to always produce a positive result.
2769 CpuFeatures::Scope scope(VFP3); 3204 CpuFeatures::Scope scope(VFP3);
2770 __ vmov(s0, r2); 3205 __ vmov(s0, r2);
2771 __ vcvt_f64_s32(d0, s0); 3206 if (op_ == Token::SHR) {
3207 __ vcvt_f64_u32(d0, s0);
3208 } else {
3209 __ vcvt_f64_s32(d0, s0);
3210 }
2772 __ sub(r3, r0, Operand(kHeapObjectTag)); 3211 __ sub(r3, r0, Operand(kHeapObjectTag));
2773 __ vstr(d0, r3, HeapNumber::kValueOffset); 3212 __ vstr(d0, r3, HeapNumber::kValueOffset);
2774 __ Ret(); 3213 __ Ret();
2775 } else { 3214 } else {
2776 // Tail call that writes the int32 in r2 to the heap number in r0, using 3215 // Tail call that writes the int32 in r2 to the heap number in r0, using
2777 // r3 as scratch. r0 is preserved and returned. 3216 // r3 as scratch. r0 is preserved and returned.
2778 WriteInt32ToHeapNumberStub stub(r2, r0, r3); 3217 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2779 __ TailCallStub(&stub); 3218 __ TailCallStub(&stub);
2780 } 3219 }
2781 break; 3220 break;
2782 } 3221 }
2783 default: 3222 default:
2784 UNREACHABLE(); 3223 UNREACHABLE();
2785 } 3224 }
2786 } 3225 }
2787 3226
2788 3227
2789 // Generate the smi code. If the operation on smis are successful this return is 3228 // Generate the smi code. If the operation on smis are successful this return is
2790 // generated. If the result is not a smi and heap number allocation is not 3229 // generated. If the result is not a smi and heap number allocation is not
2791 // requested the code falls through. If number allocation is requested but a 3230 // requested the code falls through. If number allocation is requested but a
2792 // heap number cannot be allocated the code jumps to the lable gc_required. 3231 // heap number cannot be allocated the code jumps to the lable gc_required.
2793 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, 3232 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
2794 Label* gc_required, 3233 Label* gc_required,
2795 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 3234 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2796 Label not_smis; 3235 Label not_smis;
2797 3236
2798 ASSERT(op_ == Token::ADD ||
2799 op_ == Token::SUB ||
2800 op_ == Token::MUL ||
2801 op_ == Token::DIV ||
2802 op_ == Token::MOD ||
2803 op_ == Token::BIT_OR ||
2804 op_ == Token::BIT_AND ||
2805 op_ == Token::BIT_XOR);
2806
2807 Register left = r1; 3237 Register left = r1;
2808 Register right = r0; 3238 Register right = r0;
2809 Register scratch1 = r7; 3239 Register scratch1 = r7;
2810 Register scratch2 = r9; 3240 Register scratch2 = r9;
2811 3241
2812 // Perform combined smi check on both operands. 3242 // Perform combined smi check on both operands.
2813 __ orr(scratch1, left, Operand(right)); 3243 __ orr(scratch1, left, Operand(right));
2814 STATIC_ASSERT(kSmiTag == 0); 3244 STATIC_ASSERT(kSmiTag == 0);
2815 __ tst(scratch1, Operand(kSmiTagMask)); 3245 __ tst(scratch1, Operand(kSmiTagMask));
2816 __ b(ne, &not_smis); 3246 __ b(ne, &not_smis);
2817 3247
2818 // If the smi-smi operation results in a smi return is generated. 3248 // If the smi-smi operation results in a smi return is generated.
2819 GenerateSmiSmiOperation(masm); 3249 GenerateSmiSmiOperation(masm);
2820 3250
2821 // If heap number results are possible generate the result in an allocated 3251 // If heap number results are possible generate the result in an allocated
2822 // heap number. 3252 // heap number.
2823 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { 3253 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2824 GenerateFPOperation(masm, true, NULL, gc_required); 3254 GenerateFPOperation(masm, true, NULL, gc_required);
2825 } 3255 }
2826 __ bind(&not_smis); 3256 __ bind(&not_smis);
2827 } 3257 }
2828 3258
2829 3259
2830 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 3260 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2831 Label not_smis, call_runtime; 3261 Label not_smis, call_runtime;
2832 3262
2833 ASSERT(op_ == Token::ADD ||
2834 op_ == Token::SUB ||
2835 op_ == Token::MUL ||
2836 op_ == Token::DIV ||
2837 op_ == Token::MOD ||
2838 op_ == Token::BIT_OR ||
2839 op_ == Token::BIT_AND ||
2840 op_ == Token::BIT_XOR);
2841
2842 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || 3263 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
2843 result_type_ == TRBinaryOpIC::SMI) { 3264 result_type_ == TRBinaryOpIC::SMI) {
2844 // Only allow smi results. 3265 // Only allow smi results.
2845 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); 3266 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
2846 } else { 3267 } else {
2847 // Allow heap number result and don't make a transition if a heap number 3268 // Allow heap number result and don't make a transition if a heap number
2848 // cannot be allocated. 3269 // cannot be allocated.
2849 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 3270 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2850 } 3271 }
2851 3272
(...skipping 10 matching lines...) Expand all
2862 ASSERT(operands_type_ == TRBinaryOpIC::STRING); 3283 ASSERT(operands_type_ == TRBinaryOpIC::STRING);
2863 ASSERT(op_ == Token::ADD); 3284 ASSERT(op_ == Token::ADD);
2864 // Try to add arguments as strings, otherwise, transition to the generic 3285 // Try to add arguments as strings, otherwise, transition to the generic
2865 // TRBinaryOpIC type. 3286 // TRBinaryOpIC type.
2866 GenerateAddStrings(masm); 3287 GenerateAddStrings(masm);
2867 GenerateTypeTransition(masm); 3288 GenerateTypeTransition(masm);
2868 } 3289 }
2869 3290
2870 3291
2871 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 3292 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2872 ASSERT(op_ == Token::ADD ||
2873 op_ == Token::SUB ||
2874 op_ == Token::MUL ||
2875 op_ == Token::DIV ||
2876 op_ == Token::MOD ||
2877 op_ == Token::BIT_OR ||
2878 op_ == Token::BIT_AND ||
2879 op_ == Token::BIT_XOR);
2880
2881 ASSERT(operands_type_ == TRBinaryOpIC::INT32); 3293 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
2882 3294
2883 GenerateTypeTransition(masm); 3295 Register left = r1;
3296 Register right = r0;
3297 Register scratch1 = r7;
3298 Register scratch2 = r9;
3299 DwVfpRegister double_scratch = d0;
3300 SwVfpRegister single_scratch = s3;
3301
3302 Register heap_number_result = no_reg;
3303 Register heap_number_map = r6;
3304 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3305
3306 Label call_runtime;
3307 // Labels for type transition, used for wrong input or output types.
3308 // Both label are currently actually bound to the same position. We use two
3309 // different label to differentiate the cause leading to type transition.
3310 Label transition;
3311
3312 // Smi-smi fast case.
3313 Label skip;
3314 __ orr(scratch1, left, right);
3315 __ JumpIfNotSmi(scratch1, &skip);
3316 GenerateSmiSmiOperation(masm);
3317 // Fall through if the result is not a smi.
3318 __ bind(&skip);
3319
3320 switch (op_) {
3321 case Token::ADD:
3322 case Token::SUB:
3323 case Token::MUL:
3324 case Token::DIV:
3325 case Token::MOD: {
3326 // Load both operands and check that they are 32-bit integer.
3327 // Jump to type transition if they are not. The registers r0 and r1 (right
3328 // and left) are preserved for the runtime call.
3329 FloatingPointHelper::Destination destination =
3330 Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
3331 op_ != Token::MOD ?
3332 FloatingPointHelper::kVFPRegisters :
3333 FloatingPointHelper::kCoreRegisters;
3334
3335 FloatingPointHelper::LoadNumberAsInt32Double(masm,
3336 right,
3337 destination,
3338 d7,
3339 r2,
3340 r3,
3341 heap_number_map,
3342 scratch1,
3343 scratch2,
3344 s0,
3345 &transition);
3346 FloatingPointHelper::LoadNumberAsInt32Double(masm,
3347 left,
3348 destination,
3349 d6,
3350 r4,
3351 r5,
3352 heap_number_map,
3353 scratch1,
3354 scratch2,
3355 s0,
3356 &transition);
3357
3358 if (destination == FloatingPointHelper::kVFPRegisters) {
3359 CpuFeatures::Scope scope(VFP3);
3360 Label return_heap_number;
3361 switch (op_) {
3362 case Token::ADD:
3363 __ vadd(d5, d6, d7);
3364 break;
3365 case Token::SUB:
3366 __ vsub(d5, d6, d7);
3367 break;
3368 case Token::MUL:
3369 __ vmul(d5, d6, d7);
3370 break;
3371 case Token::DIV:
3372 __ vdiv(d5, d6, d7);
3373 break;
3374 default:
3375 UNREACHABLE();
3376 }
3377
3378 if (op_ != Token::DIV) {
3379 // These operations produce an integer result.
3380 // Try to return a smi if we can.
3381 // Otherwise return a heap number if allowed, or jump to type
3382 // transition.
3383
3384 __ EmitVFPTruncate(kRoundToZero,
3385 single_scratch,
3386 d5,
3387 scratch1,
3388 scratch2);
3389
3390 if (result_type_ <= TRBinaryOpIC::INT32) {
3391 // If the ne condition is set, result does
3392 // not fit in a 32-bit integer.
3393 __ b(ne, &transition);
3394 }
3395
3396 // Check if the result fits in a smi.
3397 __ vmov(scratch1, single_scratch);
3398 __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
3399 // If not try to return a heap number.
3400 __ b(mi, &return_heap_number);
3401 // Tag the result and return.
3402 __ SmiTag(r0, scratch1);
3403 __ Ret();
3404 }
3405
3406 if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
3407 : TRBinaryOpIC::INT32) {
3408 __ bind(&return_heap_number);
3409 // We are using vfp registers so r5 is available.
3410 heap_number_result = r5;
3411 GenerateHeapResultAllocation(masm,
3412 heap_number_result,
3413 heap_number_map,
3414 scratch1,
3415 scratch2,
3416 &call_runtime);
3417 __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3418 __ vstr(d5, r0, HeapNumber::kValueOffset);
3419 __ mov(r0, heap_number_result);
3420 __ Ret();
3421 }
3422
3423 // A DIV operation expecting an integer result falls through
3424 // to type transition.
3425
3426 } else {
3427 // We preserved r0 and r1 to be able to call runtime.
3428 // Save the left value on the stack.
3429 __ Push(r5, r4);
3430
3431 // Allocate a heap number to store the result.
3432 heap_number_result = r5;
3433 GenerateHeapResultAllocation(masm,
3434 heap_number_result,
3435 heap_number_map,
3436 scratch1,
3437 scratch2,
3438 &call_runtime);
3439
3440 // Load the left value from the value saved on the stack.
3441 __ Pop(r1, r0);
3442
3443 // Call the C function to handle the double operation.
3444 FloatingPointHelper::CallCCodeForDoubleOperation(
3445 masm, op_, heap_number_result, scratch1);
3446 }
3447
3448 break;
3449 }
3450
3451 case Token::BIT_OR:
3452 case Token::BIT_XOR:
3453 case Token::BIT_AND:
3454 case Token::SAR:
3455 case Token::SHR:
3456 case Token::SHL: {
3457 Label return_heap_number;
3458 Register scratch3 = r5;
3459 // Convert operands to 32-bit integers. Right in r2 and left in r3. The
3460 // registers r0 and r1 (right and left) are preserved for the runtime
3461 // call.
3462 FloatingPointHelper::LoadNumberAsInt32(masm,
3463 left,
3464 r3,
3465 heap_number_map,
3466 scratch1,
3467 scratch2,
3468 scratch3,
3469 d0,
3470 &transition);
3471 FloatingPointHelper::LoadNumberAsInt32(masm,
3472 right,
3473 r2,
3474 heap_number_map,
3475 scratch1,
3476 scratch2,
3477 scratch3,
3478 d0,
3479 &transition);
3480
3481 // The ECMA-262 standard specifies that, for shift operations, only the
3482 // 5 least significant bits of the shift value should be used.
3483 switch (op_) {
3484 case Token::BIT_OR:
3485 __ orr(r2, r3, Operand(r2));
3486 break;
3487 case Token::BIT_XOR:
3488 __ eor(r2, r3, Operand(r2));
3489 break;
3490 case Token::BIT_AND:
3491 __ and_(r2, r3, Operand(r2));
3492 break;
3493 case Token::SAR:
3494 __ and_(r2, r2, Operand(0x1f));
3495 __ mov(r2, Operand(r3, ASR, r2));
3496 break;
3497 case Token::SHR:
3498 __ and_(r2, r2, Operand(0x1f));
3499 __ mov(r2, Operand(r3, LSR, r2), SetCC);
3500 // SHR is special because it is required to produce a positive answer.
3501 // We only get a negative result if the shift value (r2) is 0.
3502 // This result cannot be respresented as a signed 32-bit integer, try
3503 // to return a heap number if we can.
3504 // The non vfp3 code does not support this special case, so jump to
3505 // runtime if we don't support it.
3506 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3507 __ b(mi,
3508 (result_type_ <= TRBinaryOpIC::INT32) ? &transition
3509 : &return_heap_number);
3510 } else {
3511 __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
3512 : &call_runtime);
3513 }
3514 break;
3515 case Token::SHL:
3516 __ and_(r2, r2, Operand(0x1f));
3517 __ mov(r2, Operand(r3, LSL, r2));
3518 break;
3519 default:
3520 UNREACHABLE();
3521 }
3522
3523 // Check if the result fits in a smi.
3524 __ add(scratch1, r2, Operand(0x40000000), SetCC);
3525 // If not try to return a heap number. (We know the result is an int32.)
3526 __ b(mi, &return_heap_number);
3527 // Tag the result and return.
3528 __ SmiTag(r0, r2);
3529 __ Ret();
3530
3531 __ bind(&return_heap_number);
3532 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3533 CpuFeatures::Scope scope(VFP3);
3534 heap_number_result = r5;
3535 GenerateHeapResultAllocation(masm,
3536 heap_number_result,
3537 heap_number_map,
3538 scratch1,
3539 scratch2,
3540 &call_runtime);
3541
3542 if (op_ != Token::SHR) {
3543 // Convert the result to a floating point value.
3544 __ vmov(double_scratch.low(), r2);
3545 __ vcvt_f64_s32(double_scratch, double_scratch.low());
3546 } else {
3547 // The result must be interpreted as an unsigned 32-bit integer.
3548 __ vmov(double_scratch.low(), r2);
3549 __ vcvt_f64_u32(double_scratch, double_scratch.low());
3550 }
3551
3552 // Store the result.
3553 __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3554 __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
3555 __ mov(r0, heap_number_result);
3556 __ Ret();
3557 } else {
3558 // Tail call that writes the int32 in r2 to the heap number in r0, using
3559 // r3 as scratch. r0 is preserved and returned.
3560 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
3561 __ TailCallStub(&stub);
3562 }
3563
3564 break;
3565 }
3566
3567 default:
3568 UNREACHABLE();
3569 }
3570
3571 if (transition.is_linked()) {
3572 __ bind(&transition);
3573 GenerateTypeTransition(masm);
3574 }
3575
3576 __ bind(&call_runtime);
3577 GenerateCallRuntime(masm);
2884 } 3578 }
2885 3579
2886 3580
2887 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 3581 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2888 ASSERT(op_ == Token::ADD ||
2889 op_ == Token::SUB ||
2890 op_ == Token::MUL ||
2891 op_ == Token::DIV ||
2892 op_ == Token::MOD ||
2893 op_ == Token::BIT_OR ||
2894 op_ == Token::BIT_AND ||
2895 op_ == Token::BIT_XOR);
2896
2897 Label not_numbers, call_runtime; 3582 Label not_numbers, call_runtime;
2898 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); 3583 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
2899 3584
2900 GenerateFPOperation(masm, false, &not_numbers, &call_runtime); 3585 GenerateFPOperation(masm, false, &not_numbers, &call_runtime);
2901 3586
2902 __ bind(&not_numbers); 3587 __ bind(&not_numbers);
2903 GenerateTypeTransition(masm); 3588 GenerateTypeTransition(masm);
2904 3589
2905 __ bind(&call_runtime); 3590 __ bind(&call_runtime);
2906 GenerateCallRuntime(masm); 3591 GenerateCallRuntime(masm);
2907 } 3592 }
2908 3593
2909 3594
2910 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 3595 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2911 ASSERT(op_ == Token::ADD || 3596 Label call_runtime, call_string_add_or_runtime;
2912 op_ == Token::SUB ||
2913 op_ == Token::MUL ||
2914 op_ == Token::DIV ||
2915 op_ == Token::MOD ||
2916 op_ == Token::BIT_OR ||
2917 op_ == Token::BIT_AND ||
2918 op_ == Token::BIT_XOR);
2919
2920 Label call_runtime;
2921 3597
2922 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 3598 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2923 3599
2924 // If all else fails, use the runtime system to get the correct 3600 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
2925 // result.
2926 __ bind(&call_runtime);
2927 3601
2928 // Try to add strings before calling runtime. 3602 __ bind(&call_string_add_or_runtime);
2929 if (op_ == Token::ADD) { 3603 if (op_ == Token::ADD) {
2930 GenerateAddStrings(masm); 3604 GenerateAddStrings(masm);
2931 } 3605 }
2932 3606
2933 GenericBinaryOpStub stub(op_, mode_, r1, r0); 3607 __ bind(&call_runtime);
2934 __ TailCallStub(&stub); 3608 GenerateCallRuntime(masm);
2935 } 3609 }
2936 3610
2937 3611
2938 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { 3612 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2939 ASSERT(op_ == Token::ADD); 3613 ASSERT(op_ == Token::ADD);
3614 Label left_not_string, call_runtime;
2940 3615
2941 Register left = r1; 3616 Register left = r1;
2942 Register right = r0; 3617 Register right = r0;
2943 Label call_runtime;
2944 3618
2945 // Check if first argument is a string. 3619 // Check if left argument is a string.
2946 __ JumpIfSmi(left, &call_runtime); 3620 __ JumpIfSmi(left, &left_not_string);
2947 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); 3621 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
2948 __ b(ge, &call_runtime); 3622 __ b(ge, &left_not_string);
2949 3623
2950 // First argument is a a string, test second. 3624 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3625 GenerateRegisterArgsPush(masm);
3626 __ TailCallStub(&string_add_left_stub);
3627
3628 // Left operand is not a string, test right.
3629 __ bind(&left_not_string);
2951 __ JumpIfSmi(right, &call_runtime); 3630 __ JumpIfSmi(right, &call_runtime);
2952 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); 3631 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
2953 __ b(ge, &call_runtime); 3632 __ b(ge, &call_runtime);
2954 3633
2955 // First and second argument are strings. 3634 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2956 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2957 GenerateRegisterArgsPush(masm); 3635 GenerateRegisterArgsPush(masm);
2958 __ TailCallStub(&string_add_stub); 3636 __ TailCallStub(&string_add_right_stub);
2959 3637
2960 // At least one argument is not a string. 3638 // At least one argument is not a string.
2961 __ bind(&call_runtime); 3639 __ bind(&call_runtime);
2962 } 3640 }
2963 3641
2964 3642
2965 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { 3643 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
2966 GenerateRegisterArgsPush(masm); 3644 GenerateRegisterArgsPush(masm);
2967 switch (op_) { 3645 switch (op_) {
2968 case Token::ADD: 3646 case Token::ADD:
(...skipping 13 matching lines...) Expand all
2982 break; 3660 break;
2983 case Token::BIT_OR: 3661 case Token::BIT_OR:
2984 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); 3662 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
2985 break; 3663 break;
2986 case Token::BIT_AND: 3664 case Token::BIT_AND:
2987 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); 3665 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
2988 break; 3666 break;
2989 case Token::BIT_XOR: 3667 case Token::BIT_XOR:
2990 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); 3668 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
2991 break; 3669 break;
3670 case Token::SAR:
3671 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
3672 break;
3673 case Token::SHR:
3674 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
3675 break;
3676 case Token::SHL:
3677 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
3678 break;
2992 default: 3679 default:
2993 UNREACHABLE(); 3680 UNREACHABLE();
2994 } 3681 }
2995 } 3682 }
2996 3683
2997 3684
2998 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( 3685 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2999 MacroAssembler* masm, 3686 MacroAssembler* masm,
3000 Register result, 3687 Register result,
3001 Register heap_number_map, 3688 Register heap_number_map,
(...skipping 26 matching lines...) Expand all
3028 } 3715 }
3029 } 3716 }
3030 3717
3031 3718
3032 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 3719 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3033 __ Push(r1, r0); 3720 __ Push(r1, r0);
3034 } 3721 }
3035 3722
3036 3723
3037 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 3724 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3038 // Argument is a number and is on stack and in r0. 3725 // Untagged case: double input in d2, double result goes
3039 Label runtime_call; 3726 // into d2.
3727 // Tagged case: tagged input on top of stack and in r0,
3728 // tagged result (heap number) goes into r0.
3729
3040 Label input_not_smi; 3730 Label input_not_smi;
3041 Label loaded; 3731 Label loaded;
3732 Label calculate;
3733 Label invalid_cache;
3734 const Register scratch0 = r9;
3735 const Register scratch1 = r7;
3736 const Register cache_entry = r0;
3737 const bool tagged = (argument_type_ == TAGGED);
3042 3738
3043 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 3739 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3044 // Load argument and check if it is a smi. 3740 CpuFeatures::Scope scope(VFP3);
3045 __ JumpIfNotSmi(r0, &input_not_smi); 3741 if (tagged) {
3742 // Argument is a number and is on stack and in r0.
3743 // Load argument and check if it is a smi.
3744 __ JumpIfNotSmi(r0, &input_not_smi);
3046 3745
3047 CpuFeatures::Scope scope(VFP3); 3746 // Input is a smi. Convert to double and load the low and high words
3048 // Input is a smi. Convert to double and load the low and high words 3747 // of the double into r2, r3.
3049 // of the double into r2, r3. 3748 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3050 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); 3749 __ b(&loaded);
3051 __ b(&loaded);
3052 3750
3053 __ bind(&input_not_smi); 3751 __ bind(&input_not_smi);
3054 // Check if input is a HeapNumber. 3752 // Check if input is a HeapNumber.
3055 __ CheckMap(r0, 3753 __ CheckMap(r0,
3056 r1, 3754 r1,
3057 Heap::kHeapNumberMapRootIndex, 3755 Heap::kHeapNumberMapRootIndex,
3058 &runtime_call, 3756 &calculate,
3059 true); 3757 true);
3060 // Input is a HeapNumber. Load it to a double register and store the 3758 // Input is a HeapNumber. Load it to a double register and store the
3061 // low and high words into r2, r3. 3759 // low and high words into r2, r3.
3062 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); 3760 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
3063 3761 __ vmov(r2, r3, d0);
3762 } else {
3763 // Input is untagged double in d2. Output goes to d2.
3764 __ vmov(r2, r3, d2);
3765 }
3064 __ bind(&loaded); 3766 __ bind(&loaded);
3065 // r2 = low 32 bits of double value 3767 // r2 = low 32 bits of double value
3066 // r3 = high 32 bits of double value 3768 // r3 = high 32 bits of double value
3067 // Compute hash (the shifts are arithmetic): 3769 // Compute hash (the shifts are arithmetic):
3068 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); 3770 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3069 __ eor(r1, r2, Operand(r3)); 3771 __ eor(r1, r2, Operand(r3));
3070 __ eor(r1, r1, Operand(r1, ASR, 16)); 3772 __ eor(r1, r1, Operand(r1, ASR, 16));
3071 __ eor(r1, r1, Operand(r1, ASR, 8)); 3773 __ eor(r1, r1, Operand(r1, ASR, 8));
3072 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); 3774 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3073 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); 3775 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3074 3776
3075 // r2 = low 32 bits of double value. 3777 // r2 = low 32 bits of double value.
3076 // r3 = high 32 bits of double value. 3778 // r3 = high 32 bits of double value.
3077 // r1 = TranscendentalCache::hash(double value). 3779 // r1 = TranscendentalCache::hash(double value).
3078 __ mov(r0, 3780 __ mov(cache_entry,
3079 Operand(ExternalReference::transcendental_cache_array_address())); 3781 Operand(ExternalReference::transcendental_cache_array_address()));
3080 // r0 points to cache array. 3782 // r0 points to cache array.
3081 __ ldr(r0, MemOperand(r0, type_ * sizeof( 3783 __ ldr(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3082 Isolate::Current()->transcendental_cache()->caches_[0]))); 3784 Isolate::Current()->transcendental_cache()->caches_[0])));
3083 // r0 points to the cache for the type type_. 3785 // r0 points to the cache for the type type_.
3084 // If NULL, the cache hasn't been initialized yet, so go through runtime. 3786 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3085 __ cmp(r0, Operand(0, RelocInfo::NONE)); 3787 __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
3086 __ b(eq, &runtime_call); 3788 __ b(eq, &invalid_cache);
3087 3789
3088 #ifdef DEBUG 3790 #ifdef DEBUG
3089 // Check that the layout of cache elements match expectations. 3791 // Check that the layout of cache elements match expectations.
3090 { TranscendentalCache::SubCache::Element test_elem[2]; 3792 { TranscendentalCache::SubCache::Element test_elem[2];
3091 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); 3793 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3092 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); 3794 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3093 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); 3795 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3094 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); 3796 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3095 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); 3797 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3096 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. 3798 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3097 CHECK_EQ(0, elem_in0 - elem_start); 3799 CHECK_EQ(0, elem_in0 - elem_start);
3098 CHECK_EQ(kIntSize, elem_in1 - elem_start); 3800 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3099 CHECK_EQ(2 * kIntSize, elem_out - elem_start); 3801 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3100 } 3802 }
3101 #endif 3803 #endif
3102 3804
3103 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. 3805 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
3104 __ add(r1, r1, Operand(r1, LSL, 1)); 3806 __ add(r1, r1, Operand(r1, LSL, 1));
3105 __ add(r0, r0, Operand(r1, LSL, 2)); 3807 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
3106 // Check if cache matches: Double value is stored in uint32_t[2] array. 3808 // Check if cache matches: Double value is stored in uint32_t[2] array.
3107 __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); 3809 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
3108 __ cmp(r2, r4); 3810 __ cmp(r2, r4);
3109 __ b(ne, &runtime_call); 3811 __ b(ne, &calculate);
3110 __ cmp(r3, r5); 3812 __ cmp(r3, r5);
3111 __ b(ne, &runtime_call); 3813 __ b(ne, &calculate);
3112 // Cache hit. Load result, pop argument and return. 3814 // Cache hit. Load result, cleanup and return.
3113 __ mov(r0, Operand(r6)); 3815 if (tagged) {
3114 __ pop(); 3816 // Pop input value from stack and load result into r0.
3817 __ pop();
3818 __ mov(r0, Operand(r6));
3819 } else {
3820 // Load result into d2.
3821 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3822 }
3823 __ Ret();
3824 } // if (Isolate::Current()->cpu_features()->IsSupported(VFP3))
3825
3826 __ bind(&calculate);
3827 if (tagged) {
3828 __ bind(&invalid_cache);
3829 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
3830 } else {
3831 if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) UNREACHABLE();
3832 CpuFeatures::Scope scope(VFP3);
3833
3834 Label no_update;
3835 Label skip_cache;
3836 const Register heap_number_map = r5;
3837
3838 // Call C function to calculate the result and update the cache.
3839 // Register r0 holds precalculated cache entry address; preserve
3840 // it on the stack and pop it into register cache_entry after the
3841 // call.
3842 __ push(cache_entry);
3843 GenerateCallCFunction(masm, scratch0);
3844 __ GetCFunctionDoubleResult(d2);
3845
3846 // Try to update the cache. If we cannot allocate a
3847 // heap number, we return the result without updating.
3848 __ pop(cache_entry);
3849 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3850 __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
3851 __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3852 __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
3853 __ Ret();
3854
3855 __ bind(&invalid_cache);
3856 // The cache is invalid. Call runtime which will recreate the
3857 // cache.
3858 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3859 __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
3860 __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3861 __ EnterInternalFrame();
3862 __ push(r0);
3863 __ CallRuntime(RuntimeFunction(), 1);
3864 __ LeaveInternalFrame();
3865 __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3866 __ Ret();
3867
3868 __ bind(&skip_cache);
3869 // Call C function to calculate the result and answer directly
3870 // without updating the cache.
3871 GenerateCallCFunction(masm, scratch0);
3872 __ GetCFunctionDoubleResult(d2);
3873 __ bind(&no_update);
3874
3875 // We return the value in d2 without adding it to the cache, but
3876 // we cause a scavenging GC so that future allocations will succeed.
3877 __ EnterInternalFrame();
3878
3879 // Allocate an aligned object larger than a HeapNumber.
3880 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3881 __ mov(scratch0, Operand(4 * kPointerSize));
3882 __ push(scratch0);
3883 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3884 __ LeaveInternalFrame();
3115 __ Ret(); 3885 __ Ret();
3116 } 3886 }
3117
3118 __ bind(&runtime_call);
3119 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
3120 } 3887 }
3121 3888
3122 3889
3890 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3891 Register scratch) {
3892 __ push(lr);
3893 __ PrepareCallCFunction(2, scratch);
3894 __ vmov(r0, r1, d2);
3895 switch (type_) {
3896 case TranscendentalCache::SIN:
3897 __ CallCFunction(ExternalReference::math_sin_double_function(), 2);
3898 break;
3899 case TranscendentalCache::COS:
3900 __ CallCFunction(ExternalReference::math_cos_double_function(), 2);
3901 break;
3902 case TranscendentalCache::LOG:
3903 __ CallCFunction(ExternalReference::math_log_double_function(), 2);
3904 break;
3905 default:
3906 UNIMPLEMENTED();
3907 break;
3908 }
3909 __ pop(lr);
3910 }
3911
3912
3123 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 3913 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3124 switch (type_) { 3914 switch (type_) {
3125 // Add more cases when necessary. 3915 // Add more cases when necessary.
3126 case TranscendentalCache::SIN: return Runtime::kMath_sin; 3916 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3127 case TranscendentalCache::COS: return Runtime::kMath_cos; 3917 case TranscendentalCache::COS: return Runtime::kMath_cos;
3128 case TranscendentalCache::LOG: return Runtime::kMath_log; 3918 case TranscendentalCache::LOG: return Runtime::kMath_log;
3129 default: 3919 default:
3130 UNIMPLEMENTED(); 3920 UNIMPLEMENTED();
3131 return Runtime::kAbort; 3921 return Runtime::kAbort;
3132 } 3922 }
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
3267 case Token::BIT_NOT: 4057 case Token::BIT_NOT:
3268 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); 4058 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
3269 break; 4059 break;
3270 default: 4060 default:
3271 UNREACHABLE(); 4061 UNREACHABLE();
3272 } 4062 }
3273 } 4063 }
3274 4064
3275 4065
3276 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { 4066 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
3277 // r0 holds the exception. 4067 __ Throw(r0);
3278
3279 // Adjust this code if not the case.
3280 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
3281
3282 // Drop the sp to the top of the handler.
3283 __ mov(r3, Operand(ExternalReference(Isolate::k_handler_address)));
3284 __ ldr(sp, MemOperand(r3));
3285
3286 // Restore the next handler and frame pointer, discard handler state.
3287 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3288 __ pop(r2);
3289 __ str(r2, MemOperand(r3));
3290 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
3291 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
3292
3293 // Before returning we restore the context from the frame pointer if
3294 // not NULL. The frame pointer is NULL in the exception handler of a
3295 // JS entry frame.
3296 __ cmp(fp, Operand(0, RelocInfo::NONE));
3297 // Set cp to NULL if fp is NULL.
3298 __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3299 // Restore cp otherwise.
3300 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
3301 #ifdef DEBUG
3302 if (FLAG_debug_code) {
3303 __ mov(lr, Operand(pc));
3304 }
3305 #endif
3306 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
3307 __ pop(pc);
3308 } 4068 }
3309 4069
3310 4070
3311 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, 4071 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3312 UncatchableExceptionType type) { 4072 UncatchableExceptionType type) {
3313 // Adjust this code if not the case. 4073 __ ThrowUncatchable(type, r0);
3314 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
3315
3316 // Drop sp to the top stack handler.
3317 __ mov(r3, Operand(ExternalReference(Isolate::k_handler_address)));
3318 __ ldr(sp, MemOperand(r3));
3319
3320 // Unwind the handlers until the ENTRY handler is found.
3321 Label loop, done;
3322 __ bind(&loop);
3323 // Load the type of the current stack handler.
3324 const int kStateOffset = StackHandlerConstants::kStateOffset;
3325 __ ldr(r2, MemOperand(sp, kStateOffset));
3326 __ cmp(r2, Operand(StackHandler::ENTRY));
3327 __ b(eq, &done);
3328 // Fetch the next handler in the list.
3329 const int kNextOffset = StackHandlerConstants::kNextOffset;
3330 __ ldr(sp, MemOperand(sp, kNextOffset));
3331 __ jmp(&loop);
3332 __ bind(&done);
3333
3334 // Set the top handler address to next handler past the current ENTRY handler.
3335 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3336 __ pop(r2);
3337 __ str(r2, MemOperand(r3));
3338
3339 if (type == OUT_OF_MEMORY) {
3340 // Set external caught exception to false.
3341 ExternalReference external_caught(
3342 Isolate::k_external_caught_exception_address);
3343 __ mov(r0, Operand(false, RelocInfo::NONE));
3344 __ mov(r2, Operand(external_caught));
3345 __ str(r0, MemOperand(r2));
3346
3347 // Set pending exception and r0 to out of memory exception.
3348 Failure* out_of_memory = Failure::OutOfMemoryException();
3349 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3350 __ mov(r2, Operand(ExternalReference(
3351 Isolate::k_pending_exception_address)));
3352 __ str(r0, MemOperand(r2));
3353 }
3354
3355 // Stack layout at this point. See also StackHandlerConstants.
3356 // sp -> state (ENTRY)
3357 // fp
3358 // lr
3359
3360 // Discard handler state (r2 is not used) and restore frame pointer.
3361 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
3362 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
3363 // Before returning we restore the context from the frame pointer if
3364 // not NULL. The frame pointer is NULL in the exception handler of a
3365 // JS entry frame.
3366 __ cmp(fp, Operand(0, RelocInfo::NONE));
3367 // Set cp to NULL if fp is NULL.
3368 __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3369 // Restore cp otherwise.
3370 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
3371 #ifdef DEBUG
3372 if (FLAG_debug_code) {
3373 __ mov(lr, Operand(pc));
3374 }
3375 #endif
3376 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
3377 __ pop(pc);
3378 } 4074 }
3379 4075
3380 4076
3381 void CEntryStub::GenerateCore(MacroAssembler* masm, 4077 void CEntryStub::GenerateCore(MacroAssembler* masm,
3382 Label* throw_normal_exception, 4078 Label* throw_normal_exception,
3383 Label* throw_termination_exception, 4079 Label* throw_termination_exception,
3384 Label* throw_out_of_memory_exception, 4080 Label* throw_out_of_memory_exception,
3385 bool do_gc, 4081 bool do_gc,
3386 bool always_allocate) { 4082 bool always_allocate) {
3387 // r0: result parameter for PerformGC, if any 4083 // r0: result parameter for PerformGC, if any
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
3457 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); 4153 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3458 // Lower 2 bits of r2 are 0 iff r0 has failure tag. 4154 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
3459 __ add(r2, r0, Operand(1)); 4155 __ add(r2, r0, Operand(1));
3460 __ tst(r2, Operand(kFailureTagMask)); 4156 __ tst(r2, Operand(kFailureTagMask));
3461 __ b(eq, &failure_returned); 4157 __ b(eq, &failure_returned);
3462 4158
3463 // Exit C frame and return. 4159 // Exit C frame and return.
3464 // r0:r1: result 4160 // r0:r1: result
3465 // sp: stack pointer 4161 // sp: stack pointer
3466 // fp: frame pointer 4162 // fp: frame pointer
3467 __ LeaveExitFrame(save_doubles_); 4163 // Callee-saved register r4 still holds argc.
4164 __ LeaveExitFrame(save_doubles_, r4);
4165 __ mov(pc, lr);
3468 4166
3469 // check if we should retry or throw exception 4167 // check if we should retry or throw exception
3470 Label retry; 4168 Label retry;
3471 __ bind(&failure_returned); 4169 __ bind(&failure_returned);
3472 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); 4170 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3473 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); 4171 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3474 __ b(eq, &retry); 4172 __ b(eq, &retry);
3475 4173
3476 // Special handling of out of memory exceptions. 4174 // Special handling of out of memory exceptions.
3477 Failure* out_of_memory = Failure::OutOfMemoryException(); 4175 Failure* out_of_memory = Failure::OutOfMemoryException();
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after
3769 // map and function. The cached answer will be set when it is known below. 4467 // map and function. The cached answer will be set when it is known below.
3770 if (!HasCallSiteInlineCheck()) { 4468 if (!HasCallSiteInlineCheck()) {
3771 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); 4469 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
3772 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); 4470 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
3773 } else { 4471 } else {
3774 ASSERT(HasArgsInRegisters()); 4472 ASSERT(HasArgsInRegisters());
3775 // Patch the (relocated) inlined map check. 4473 // Patch the (relocated) inlined map check.
3776 4474
3777 // The offset was stored in r4 safepoint slot. 4475 // The offset was stored in r4 safepoint slot.
3778 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) 4476 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
3779 __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4)); 4477 __ LoadFromSafepointRegisterSlot(scratch, r4);
3780 __ sub(inline_site, lr, scratch); 4478 __ sub(inline_site, lr, scratch);
3781 // Get the map location in scratch and patch it. 4479 // Get the map location in scratch and patch it.
3782 __ GetRelocatedValueLocation(inline_site, scratch); 4480 __ GetRelocatedValueLocation(inline_site, scratch);
3783 __ str(map, MemOperand(scratch)); 4481 __ str(map, MemOperand(scratch));
3784 } 4482 }
3785 4483
3786 // Register mapping: r3 is object map and r4 is function prototype. 4484 // Register mapping: r3 is object map and r4 is function prototype.
3787 // Get prototype of object into r2. 4485 // Get prototype of object into r2.
3788 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); 4486 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
3789 4487
(...skipping 445 matching lines...) Expand 10 before | Expand all | Expand 10 after
4235 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); 4933 __ mov(r1, Operand(r1, ASR, kSmiTagSize));
4236 4934
4237 // r1: previous index 4935 // r1: previous index
4238 // r3: encoding of subject string (1 if ascii, 0 if two_byte); 4936 // r3: encoding of subject string (1 if ascii, 0 if two_byte);
4239 // r7: code 4937 // r7: code
4240 // subject: Subject string 4938 // subject: Subject string
4241 // regexp_data: RegExp data (FixedArray) 4939 // regexp_data: RegExp data (FixedArray)
4242 // All checks done. Now push arguments for native regexp code. 4940 // All checks done. Now push arguments for native regexp code.
4243 __ IncrementCounter(COUNTERS->regexp_entry_native(), 1, r0, r2); 4941 __ IncrementCounter(COUNTERS->regexp_entry_native(), 1, r0, r2);
4244 4942
4245 static const int kRegExpExecuteArguments = 7; 4943 // Isolates: note we add an additional parameter here (isolate pointer).
4246 __ push(lr); 4944 static const int kRegExpExecuteArguments = 8;
4247 __ PrepareCallCFunction(kRegExpExecuteArguments, r0); 4945 static const int kParameterRegisters = 4;
4946 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4248 4947
4249 // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. 4948 // Stack pointer now points to cell where return address is to be written.
4949 // Arguments are before that on the stack or in registers.
4950
4951 // Argument 8 (sp[16]): Pass current isolate address.
4952 __ mov(r0, Operand(ExternalReference::isolate_address()));
4953 __ str(r0, MemOperand(sp, 4 * kPointerSize));
4954
4955 // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
4250 __ mov(r0, Operand(1)); 4956 __ mov(r0, Operand(1));
4251 __ str(r0, MemOperand(sp, 2 * kPointerSize)); 4957 __ str(r0, MemOperand(sp, 3 * kPointerSize));
4252 4958
4253 // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. 4959 // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
4254 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); 4960 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
4255 __ ldr(r0, MemOperand(r0, 0)); 4961 __ ldr(r0, MemOperand(r0, 0));
4256 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); 4962 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
4257 __ ldr(r2, MemOperand(r2, 0)); 4963 __ ldr(r2, MemOperand(r2, 0));
4258 __ add(r0, r0, Operand(r2)); 4964 __ add(r0, r0, Operand(r2));
4965 __ str(r0, MemOperand(sp, 2 * kPointerSize));
4966
4967 // Argument 5 (sp[4]): static offsets vector buffer.
4968 __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
4259 __ str(r0, MemOperand(sp, 1 * kPointerSize)); 4969 __ str(r0, MemOperand(sp, 1 * kPointerSize));
4260 4970
4261 // Argument 5 (sp[0]): static offsets vector buffer.
4262 __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
4263 __ str(r0, MemOperand(sp, 0 * kPointerSize));
4264
4265 // For arguments 4 and 3 get string length, calculate start of string data and 4971 // For arguments 4 and 3 get string length, calculate start of string data and
4266 // calculate the shift of the index (0 for ASCII and 1 for two byte). 4972 // calculate the shift of the index (0 for ASCII and 1 for two byte).
4267 __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); 4973 __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
4268 __ mov(r0, Operand(r0, ASR, kSmiTagSize)); 4974 __ mov(r0, Operand(r0, ASR, kSmiTagSize));
4269 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); 4975 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4270 __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 4976 __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
4271 __ eor(r3, r3, Operand(1)); 4977 __ eor(r3, r3, Operand(1));
4272 // Argument 4 (r3): End of string data 4978 // Argument 4 (r3): End of string data
4273 // Argument 3 (r2): Start of string data 4979 // Argument 3 (r2): Start of string data
4274 __ add(r2, r9, Operand(r1, LSL, r3)); 4980 __ add(r2, r9, Operand(r1, LSL, r3));
4275 __ add(r3, r9, Operand(r0, LSL, r3)); 4981 __ add(r3, r9, Operand(r0, LSL, r3));
4276 4982
4277 // Argument 2 (r1): Previous index. 4983 // Argument 2 (r1): Previous index.
4278 // Already there 4984 // Already there
4279 4985
4280 // Argument 1 (r0): Subject string. 4986 // Argument 1 (r0): Subject string.
4281 __ mov(r0, subject); 4987 __ mov(r0, subject);
4282 4988
4283 // Locate the code entry and call it. 4989 // Locate the code entry and call it.
4284 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); 4990 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
4285 __ CallCFunction(r7, r9, kRegExpExecuteArguments); 4991 DirectCEntryStub stub;
4286 __ pop(lr); 4992 stub.GenerateCall(masm, r7);
4993
4994 __ LeaveExitFrame(false, no_reg);
4287 4995
4288 // r0: result 4996 // r0: result
4289 // subject: subject string (callee saved) 4997 // subject: subject string (callee saved)
4290 // regexp_data: RegExp data (callee saved) 4998 // regexp_data: RegExp data (callee saved)
4291 // last_match_info_elements: Last match info elements (callee saved) 4999 // last_match_info_elements: Last match info elements (callee saved)
4292 5000
4293 // Check the result. 5001 // Check the result.
4294 Label success; 5002 Label success;
5003
4295 __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); 5004 __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
4296 __ b(eq, &success); 5005 __ b(eq, &success);
4297 Label failure; 5006 Label failure;
4298 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); 5007 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
4299 __ b(eq, &failure); 5008 __ b(eq, &failure);
4300 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); 5009 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
4301 // If not exception it can only be retry. Handle that in the runtime system. 5010 // If not exception it can only be retry. Handle that in the runtime system.
4302 __ b(ne, &runtime); 5011 __ b(ne, &runtime);
4303 // Result must now be exception. If there is no pending exception already a 5012 // Result must now be exception. If there is no pending exception already a
4304 // stack overflow (on the backtrack stack) was detected in RegExp code but 5013 // stack overflow (on the backtrack stack) was detected in RegExp code but
4305 // haven't created the exception yet. Handle that in the runtime system. 5014 // haven't created the exception yet. Handle that in the runtime system.
4306 // TODO(592): Rerunning the RegExp to get the stack overflow exception. 5015 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4307 __ mov(r0, Operand(ExternalReference::the_hole_value_location())); 5016 __ mov(r1, Operand(ExternalReference::the_hole_value_location()));
4308 __ ldr(r0, MemOperand(r0, 0));
4309 __ mov(r1, Operand(ExternalReference(Isolate::k_pending_exception_address)));
4310 __ ldr(r1, MemOperand(r1, 0)); 5017 __ ldr(r1, MemOperand(r1, 0));
5018 __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address)));
5019 __ ldr(r0, MemOperand(r2, 0));
4311 __ cmp(r0, r1); 5020 __ cmp(r0, r1);
4312 __ b(eq, &runtime); 5021 __ b(eq, &runtime);
5022
5023 __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
5024
5025 // Check if the exception is a termination. If so, throw as uncatchable.
5026 __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
5027 __ cmp(r0, ip);
5028 Label termination_exception;
5029 __ b(eq, &termination_exception);
5030
5031 __ Throw(r0); // Expects thrown value in r0.
5032
5033 __ bind(&termination_exception);
5034 __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
5035
4313 __ bind(&failure); 5036 __ bind(&failure);
4314 // For failure and exception return null. 5037 // For failure and exception return null.
4315 __ mov(r0, Operand(FACTORY->null_value())); 5038 __ mov(r0, Operand(FACTORY->null_value()));
4316 __ add(sp, sp, Operand(4 * kPointerSize)); 5039 __ add(sp, sp, Operand(4 * kPointerSize));
4317 __ Ret(); 5040 __ Ret();
4318 5041
4319 // Process the result from the native regexp code. 5042 // Process the result from the native regexp code.
4320 __ bind(&success); 5043 __ bind(&success);
4321 __ ldr(r1, 5044 __ ldr(r1,
4322 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); 5045 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
(...skipping 1160 matching lines...) Expand 10 before | Expand all | Expand 10 after
5483 GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); 6206 GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
5484 6207
5485 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 6208 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
5486 // tagged as a small integer. 6209 // tagged as a small integer.
5487 __ bind(&runtime); 6210 __ bind(&runtime);
5488 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 6211 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5489 } 6212 }
5490 6213
5491 6214
5492 void StringAddStub::Generate(MacroAssembler* masm) { 6215 void StringAddStub::Generate(MacroAssembler* masm) {
5493 Label string_add_runtime; 6216 Label string_add_runtime, call_builtin;
6217 Builtins::JavaScript builtin_id = Builtins::ADD;
6218
5494 // Stack on entry: 6219 // Stack on entry:
5495 // sp[0]: second argument. 6220 // sp[0]: second argument (right).
5496 // sp[4]: first argument. 6221 // sp[4]: first argument (left).
5497 6222
5498 // Load the two arguments. 6223 // Load the two arguments.
5499 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. 6224 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
5500 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. 6225 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
5501 6226
5502 // Make sure that both arguments are strings if not known in advance. 6227 // Make sure that both arguments are strings if not known in advance.
5503 if (string_check_) { 6228 if (flags_ == NO_STRING_ADD_FLAGS) {
5504 STATIC_ASSERT(kSmiTag == 0);
5505 __ JumpIfEitherSmi(r0, r1, &string_add_runtime); 6229 __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
5506 // Load instance types. 6230 // Load instance types.
5507 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 6231 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5508 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 6232 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5509 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 6233 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5510 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 6234 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5511 STATIC_ASSERT(kStringTag == 0); 6235 STATIC_ASSERT(kStringTag == 0);
5512 // If either is not a string, go to runtime. 6236 // If either is not a string, go to runtime.
5513 __ tst(r4, Operand(kIsNotStringMask)); 6237 __ tst(r4, Operand(kIsNotStringMask));
5514 __ tst(r5, Operand(kIsNotStringMask), eq); 6238 __ tst(r5, Operand(kIsNotStringMask), eq);
5515 __ b(ne, &string_add_runtime); 6239 __ b(ne, &string_add_runtime);
6240 } else {
6241 // Here at least one of the arguments is definitely a string.
6242 // We convert the one that is not known to be a string.
6243 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6244 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6245 GenerateConvertArgument(
6246 masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
6247 builtin_id = Builtins::STRING_ADD_RIGHT;
6248 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6249 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6250 GenerateConvertArgument(
6251 masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
6252 builtin_id = Builtins::STRING_ADD_LEFT;
6253 }
5516 } 6254 }
5517 6255
5518 // Both arguments are strings. 6256 // Both arguments are strings.
5519 // r0: first string 6257 // r0: first string
5520 // r1: second string 6258 // r1: second string
5521 // r4: first string instance type (if string_check_) 6259 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5522 // r5: second string instance type (if string_check_) 6260 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5523 { 6261 {
5524 Label strings_not_empty; 6262 Label strings_not_empty;
5525 // Check if either of the strings are empty. In that case return the other. 6263 // Check if either of the strings are empty. In that case return the other.
5526 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); 6264 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
5527 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); 6265 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
5528 STATIC_ASSERT(kSmiTag == 0); 6266 STATIC_ASSERT(kSmiTag == 0);
5529 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. 6267 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
5530 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. 6268 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
5531 STATIC_ASSERT(kSmiTag == 0); 6269 STATIC_ASSERT(kSmiTag == 0);
5532 // Else test if second string is empty. 6270 // Else test if second string is empty.
5533 __ cmp(r3, Operand(Smi::FromInt(0)), ne); 6271 __ cmp(r3, Operand(Smi::FromInt(0)), ne);
5534 __ b(ne, &strings_not_empty); // If either string was empty, return r0. 6272 __ b(ne, &strings_not_empty); // If either string was empty, return r0.
5535 6273
5536 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); 6274 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
5537 __ add(sp, sp, Operand(2 * kPointerSize)); 6275 __ add(sp, sp, Operand(2 * kPointerSize));
5538 __ Ret(); 6276 __ Ret();
5539 6277
5540 __ bind(&strings_not_empty); 6278 __ bind(&strings_not_empty);
5541 } 6279 }
5542 6280
5543 __ mov(r2, Operand(r2, ASR, kSmiTagSize)); 6281 __ mov(r2, Operand(r2, ASR, kSmiTagSize));
5544 __ mov(r3, Operand(r3, ASR, kSmiTagSize)); 6282 __ mov(r3, Operand(r3, ASR, kSmiTagSize));
5545 // Both strings are non-empty. 6283 // Both strings are non-empty.
5546 // r0: first string 6284 // r0: first string
5547 // r1: second string 6285 // r1: second string
5548 // r2: length of first string 6286 // r2: length of first string
5549 // r3: length of second string 6287 // r3: length of second string
5550 // r4: first string instance type (if string_check_) 6288 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5551 // r5: second string instance type (if string_check_) 6289 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5552 // Look at the length of the result of adding the two strings. 6290 // Look at the length of the result of adding the two strings.
5553 Label string_add_flat_result, longer_than_two; 6291 Label string_add_flat_result, longer_than_two;
5554 // Adding two lengths can't overflow. 6292 // Adding two lengths can't overflow.
5555 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); 6293 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
5556 __ add(r6, r2, Operand(r3)); 6294 __ add(r6, r2, Operand(r3));
5557 // Use the runtime system when adding two one character strings, as it 6295 // Use the runtime system when adding two one character strings, as it
5558 // contains optimizations for this specific case using the symbol table. 6296 // contains optimizations for this specific case using the symbol table.
5559 __ cmp(r6, Operand(2)); 6297 __ cmp(r6, Operand(2));
5560 __ b(ne, &longer_than_two); 6298 __ b(ne, &longer_than_two);
5561 6299
5562 // Check that both strings are non-external ascii strings. 6300 // Check that both strings are non-external ascii strings.
5563 if (!string_check_) { 6301 if (flags_ != NO_STRING_ADD_FLAGS) {
5564 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 6302 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5565 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 6303 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5566 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 6304 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5567 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 6305 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5568 } 6306 }
5569 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, 6307 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
5570 &string_add_runtime); 6308 &string_add_runtime);
5571 6309
5572 // Get the two characters forming the sub string. 6310 // Get the two characters forming the sub string.
5573 __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); 6311 __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
(...skipping 27 matching lines...) Expand all
5601 __ b(lt, &string_add_flat_result); 6339 __ b(lt, &string_add_flat_result);
5602 // Handle exceptionally long strings in the runtime system. 6340 // Handle exceptionally long strings in the runtime system.
5603 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); 6341 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
5604 ASSERT(IsPowerOf2(String::kMaxLength + 1)); 6342 ASSERT(IsPowerOf2(String::kMaxLength + 1));
5605 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. 6343 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
5606 __ cmp(r6, Operand(String::kMaxLength + 1)); 6344 __ cmp(r6, Operand(String::kMaxLength + 1));
5607 __ b(hs, &string_add_runtime); 6345 __ b(hs, &string_add_runtime);
5608 6346
5609 // If result is not supposed to be flat, allocate a cons string object. 6347 // If result is not supposed to be flat, allocate a cons string object.
5610 // If both strings are ascii the result is an ascii cons string. 6348 // If both strings are ascii the result is an ascii cons string.
5611 if (!string_check_) { 6349 if (flags_ != NO_STRING_ADD_FLAGS) {
5612 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 6350 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5613 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 6351 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5614 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 6352 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5615 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 6353 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5616 } 6354 }
5617 Label non_ascii, allocated, ascii_data; 6355 Label non_ascii, allocated, ascii_data;
5618 STATIC_ASSERT(kTwoByteStringTag == 0); 6356 STATIC_ASSERT(kTwoByteStringTag == 0);
5619 __ tst(r4, Operand(kStringEncodingMask)); 6357 __ tst(r4, Operand(kStringEncodingMask));
5620 __ tst(r5, Operand(kStringEncodingMask), ne); 6358 __ tst(r5, Operand(kStringEncodingMask), ne);
5621 __ b(eq, &non_ascii); 6359 __ b(eq, &non_ascii);
(...skipping 27 matching lines...) Expand all
5649 // Allocate a two byte cons string. 6387 // Allocate a two byte cons string.
5650 __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); 6388 __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
5651 __ jmp(&allocated); 6389 __ jmp(&allocated);
5652 6390
5653 // Handle creating a flat result. First check that both strings are 6391 // Handle creating a flat result. First check that both strings are
5654 // sequential and that they have the same encoding. 6392 // sequential and that they have the same encoding.
5655 // r0: first string 6393 // r0: first string
5656 // r1: second string 6394 // r1: second string
5657 // r2: length of first string 6395 // r2: length of first string
5658 // r3: length of second string 6396 // r3: length of second string
5659 // r4: first string instance type (if string_check_) 6397 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5660 // r5: second string instance type (if string_check_) 6398 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5661 // r6: sum of lengths. 6399 // r6: sum of lengths.
5662 __ bind(&string_add_flat_result); 6400 __ bind(&string_add_flat_result);
5663 if (!string_check_) { 6401 if (flags_ != NO_STRING_ADD_FLAGS) {
5664 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 6402 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5665 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 6403 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5666 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 6404 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5667 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 6405 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5668 } 6406 }
5669 // Check that both strings are sequential. 6407 // Check that both strings are sequential.
5670 STATIC_ASSERT(kSeqStringTag == 0); 6408 STATIC_ASSERT(kSeqStringTag == 0);
5671 __ tst(r4, Operand(kStringRepresentationMask)); 6409 __ tst(r4, Operand(kStringRepresentationMask));
5672 __ tst(r5, Operand(kStringRepresentationMask), eq); 6410 __ tst(r5, Operand(kStringRepresentationMask), eq);
5673 __ b(ne, &string_add_runtime); 6411 __ b(ne, &string_add_runtime);
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
5751 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); 6489 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
5752 6490
5753 __ mov(r0, Operand(r7)); 6491 __ mov(r0, Operand(r7));
5754 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); 6492 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
5755 __ add(sp, sp, Operand(2 * kPointerSize)); 6493 __ add(sp, sp, Operand(2 * kPointerSize));
5756 __ Ret(); 6494 __ Ret();
5757 6495
5758 // Just jump to runtime to add the two strings. 6496 // Just jump to runtime to add the two strings.
5759 __ bind(&string_add_runtime); 6497 __ bind(&string_add_runtime);
5760 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 6498 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6499
6500 if (call_builtin.is_linked()) {
6501 __ bind(&call_builtin);
6502 __ InvokeBuiltin(builtin_id, JUMP_JS);
6503 }
6504 }
6505
6506
6507 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6508 int stack_offset,
6509 Register arg,
6510 Register scratch1,
6511 Register scratch2,
6512 Register scratch3,
6513 Register scratch4,
6514 Label* slow) {
6515 // First check if the argument is already a string.
6516 Label not_string, done;
6517 __ JumpIfSmi(arg, &not_string);
6518 __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
6519 __ b(lt, &done);
6520
6521 // Check the number to string cache.
6522 Label not_cached;
6523 __ bind(&not_string);
6524 // Puts the cached result into scratch1.
6525 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6526 arg,
6527 scratch1,
6528 scratch2,
6529 scratch3,
6530 scratch4,
6531 false,
6532 &not_cached);
6533 __ mov(arg, scratch1);
6534 __ str(arg, MemOperand(sp, stack_offset));
6535 __ jmp(&done);
6536
6537 // Check if the argument is a safe string wrapper.
6538 __ bind(&not_cached);
6539 __ JumpIfSmi(arg, slow);
6540 __ CompareObjectType(
6541 arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
6542 __ b(ne, slow);
6543 __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6544 __ and_(scratch2,
6545 scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6546 __ cmp(scratch2,
6547 Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6548 __ b(ne, slow);
6549 __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6550 __ str(arg, MemOperand(sp, stack_offset));
6551
6552 __ bind(&done);
5761 } 6553 }
5762 6554
5763 6555
5764 void StringCharAtStub::Generate(MacroAssembler* masm) { 6556 void StringCharAtStub::Generate(MacroAssembler* masm) {
5765 // Expects two arguments (object, index) on the stack: 6557 // Expects two arguments (object, index) on the stack:
5766 // lr: return address 6558 // lr: return address
5767 // sp[0]: index 6559 // sp[0]: index
5768 // sp[4]: object 6560 // sp[4]: object
5769 Register object = r1; 6561 Register object = r1;
5770 Register index = r0; 6562 Register index = r0;
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
5815 ASSERT(state_ == CompareIC::SMIS); 6607 ASSERT(state_ == CompareIC::SMIS);
5816 Label miss; 6608 Label miss;
5817 __ orr(r2, r1, r0); 6609 __ orr(r2, r1, r0);
5818 __ tst(r2, Operand(kSmiTagMask)); 6610 __ tst(r2, Operand(kSmiTagMask));
5819 __ b(ne, &miss); 6611 __ b(ne, &miss);
5820 6612
5821 if (GetCondition() == eq) { 6613 if (GetCondition() == eq) {
5822 // For equality we do not care about the sign of the result. 6614 // For equality we do not care about the sign of the result.
5823 __ sub(r0, r0, r1, SetCC); 6615 __ sub(r0, r0, r1, SetCC);
5824 } else { 6616 } else {
5825 __ sub(r1, r1, r0, SetCC); 6617 // Untag before subtracting to avoid handling overflow.
5826 // Correct sign of result in case of overflow. 6618 __ SmiUntag(r1);
5827 __ rsb(r1, r1, Operand(0), SetCC, vs); 6619 __ sub(r0, r1, SmiUntagOperand(r0));
5828 __ mov(r0, r1);
5829 } 6620 }
5830 __ Ret(); 6621 __ Ret();
5831 6622
5832 __ bind(&miss); 6623 __ bind(&miss);
5833 GenerateMiss(masm); 6624 GenerateMiss(masm);
5834 } 6625 }
5835 6626
5836 6627
5837 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { 6628 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
5838 ASSERT(state_ == CompareIC::HEAP_NUMBERS); 6629 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
5926 __ Jump(r2); 6717 __ Jump(r2);
5927 } 6718 }
5928 6719
5929 6720
5930 void DirectCEntryStub::Generate(MacroAssembler* masm) { 6721 void DirectCEntryStub::Generate(MacroAssembler* masm) {
5931 __ ldr(pc, MemOperand(sp, 0)); 6722 __ ldr(pc, MemOperand(sp, 0));
5932 } 6723 }
5933 6724
5934 6725
5935 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, 6726 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
5936 ApiFunction *function) { 6727 ExternalReference function) {
6728 __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6729 RelocInfo::CODE_TARGET));
6730 __ mov(r2, Operand(function));
6731 // Push return address (accessible to GC through exit frame pc).
6732 __ str(pc, MemOperand(sp, 0));
6733 __ Jump(r2); // Call the api function.
6734 }
6735
6736
6737 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6738 Register target) {
5937 __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), 6739 __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
5938 RelocInfo::CODE_TARGET)); 6740 RelocInfo::CODE_TARGET));
5939 // Push return address (accessible to GC through exit frame pc). 6741 // Push return address (accessible to GC through exit frame pc).
5940 __ mov(r2,
5941 Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
5942 __ str(pc, MemOperand(sp, 0)); 6742 __ str(pc, MemOperand(sp, 0));
5943 __ Jump(r2); // Call the api function. 6743 __ Jump(target); // Call the C++ function.
5944 } 6744 }
5945 6745
5946 6746
5947 void GenerateFastPixelArrayLoad(MacroAssembler* masm, 6747 void GenerateFastPixelArrayLoad(MacroAssembler* masm,
5948 Register receiver, 6748 Register receiver,
5949 Register key, 6749 Register key,
5950 Register elements_map, 6750 Register elements_map,
5951 Register elements, 6751 Register elements,
5952 Register scratch1, 6752 Register scratch1,
5953 Register scratch2, 6753 Register scratch2,
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
6004 6804
6005 // Perform the indexed load and tag the result as a smi. 6805 // Perform the indexed load and tag the result as a smi.
6006 __ ldr(scratch1, 6806 __ ldr(scratch1,
6007 FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); 6807 FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
6008 __ ldrb(scratch1, MemOperand(scratch1, scratch2)); 6808 __ ldrb(scratch1, MemOperand(scratch1, scratch2));
6009 __ SmiTag(r0, scratch1); 6809 __ SmiTag(r0, scratch1);
6010 __ Ret(); 6810 __ Ret();
6011 } 6811 }
6012 6812
6013 6813
6814 void GenerateFastPixelArrayStore(MacroAssembler* masm,
6815 Register receiver,
6816 Register key,
6817 Register value,
6818 Register elements,
6819 Register elements_map,
6820 Register scratch1,
6821 Register scratch2,
6822 bool load_elements_from_receiver,
6823 bool load_elements_map_from_elements,
6824 Label* key_not_smi,
6825 Label* value_not_smi,
6826 Label* not_pixel_array,
6827 Label* out_of_range) {
6828 // Register use:
6829 // receiver - holds the receiver and is unchanged unless the
6830 // store succeeds.
6831 // key - holds the key (must be a smi) and is unchanged.
6832 // value - holds the value (must be a smi) and is unchanged.
6833 // elements - holds the element object of the receiver on entry if
6834 // load_elements_from_receiver is false, otherwise used
6835 // internally to store the pixel arrays elements and
6836 // external array pointer.
6837 // elements_map - holds the map of the element object if
6838 // load_elements_map_from_elements is false, otherwise
6839 // loaded with the element map.
6840 //
6841 Register external_pointer = elements;
6842 Register untagged_key = scratch1;
6843 Register untagged_value = scratch2;
6844
6845 if (load_elements_from_receiver) {
6846 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
6847 }
6848
6849 // By passing NULL as not_pixel_array, callers signal that they have already
6850 // verified that the receiver has pixel array elements.
6851 if (not_pixel_array != NULL) {
6852 if (load_elements_map_from_elements) {
6853 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
6854 }
6855 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
6856 __ cmp(elements_map, ip);
6857 __ b(ne, not_pixel_array);
6858 } else {
6859 if (FLAG_debug_code) {
6860 // Map check should have already made sure that elements is a pixel array.
6861 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
6862 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
6863 __ cmp(elements_map, ip);
6864 __ Assert(eq, "Elements isn't a pixel array");
6865 }
6866 }
6867
6868 // Some callers already have verified that the key is a smi. key_not_smi is
6869 // set to NULL as a sentinel for that case. Otherwise, add an explicit check
6870 // to ensure the key is a smi must be added.
6871 if (key_not_smi != NULL) {
6872 __ JumpIfNotSmi(key, key_not_smi);
6873 } else {
6874 if (FLAG_debug_code) {
6875 __ AbortIfNotSmi(key);
6876 }
6877 }
6878
6879 __ SmiUntag(untagged_key, key);
6880
6881 // Perform bounds check.
6882 __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
6883 __ cmp(untagged_key, scratch2);
6884 __ b(hs, out_of_range); // unsigned check handles negative keys.
6885
6886 __ JumpIfNotSmi(value, value_not_smi);
6887 __ SmiUntag(untagged_value, value);
6888
6889 // Clamp the value to [0..255].
6890 __ Usat(untagged_value, 8, Operand(untagged_value));
6891 // Get the pointer to the external array. This clobbers elements.
6892 __ ldr(external_pointer,
6893 FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
6894 __ strb(untagged_value, MemOperand(external_pointer, untagged_key));
6895 __ Ret();
6896 }
6897
6898
6014 #undef __ 6899 #undef __
6015 6900
6016 } } // namespace v8::internal 6901 } } // namespace v8::internal
6017 6902
6018 #endif // V8_TARGET_ARCH_ARM 6903 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698