Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(762)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 142693005: A64: Synchronize with r16918. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
52 void ToNumberStub::InitializeInterfaceDescriptor( 52 void ToNumberStub::InitializeInterfaceDescriptor(
53 Isolate* isolate, 53 Isolate* isolate,
54 CodeStubInterfaceDescriptor* descriptor) { 54 CodeStubInterfaceDescriptor* descriptor) {
55 static Register registers[] = { r0 }; 55 static Register registers[] = { r0 };
56 descriptor->register_param_count_ = 1; 56 descriptor->register_param_count_ = 1;
57 descriptor->register_params_ = registers; 57 descriptor->register_params_ = registers;
58 descriptor->deoptimization_handler_ = NULL; 58 descriptor->deoptimization_handler_ = NULL;
59 } 59 }
60 60
61 61
62 void NumberToStringStub::InitializeInterfaceDescriptor(
63 Isolate* isolate,
64 CodeStubInterfaceDescriptor* descriptor) {
65 static Register registers[] = { r0 };
66 descriptor->register_param_count_ = 1;
67 descriptor->register_params_ = registers;
68 descriptor->deoptimization_handler_ = NULL;
69 }
70
71
62 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( 72 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
63 Isolate* isolate, 73 Isolate* isolate,
64 CodeStubInterfaceDescriptor* descriptor) { 74 CodeStubInterfaceDescriptor* descriptor) {
65 static Register registers[] = { r3, r2, r1 }; 75 static Register registers[] = { r3, r2, r1 };
66 descriptor->register_param_count_ = 3; 76 descriptor->register_param_count_ = 3;
67 descriptor->register_params_ = registers; 77 descriptor->register_params_ = registers;
68 descriptor->deoptimization_handler_ = 78 descriptor->deoptimization_handler_ =
69 Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; 79 Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
70 } 80 }
71 81
(...skipping 746 matching lines...) Expand 10 before | Expand all | Expand 10 after
818 } else { 828 } else {
819 // Smi compared non-strictly with a non-Smi non-heap-number. Call 829 // Smi compared non-strictly with a non-Smi non-heap-number. Call
820 // the runtime. 830 // the runtime.
821 __ b(ne, slow); 831 __ b(ne, slow);
822 } 832 }
823 833
824 // Lhs is a smi, rhs is a number. 834 // Lhs is a smi, rhs is a number.
825 // Convert lhs to a double in d7. 835 // Convert lhs to a double in d7.
826 __ SmiToDouble(d7, lhs); 836 __ SmiToDouble(d7, lhs);
827 // Load the double from rhs, tagged HeapNumber r0, to d6. 837 // Load the double from rhs, tagged HeapNumber r0, to d6.
828 __ sub(r7, rhs, Operand(kHeapObjectTag)); 838 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
829 __ vldr(d6, r7, HeapNumber::kValueOffset);
830 839
831 // We now have both loaded as doubles but we can skip the lhs nan check 840 // We now have both loaded as doubles but we can skip the lhs nan check
832 // since it's a smi. 841 // since it's a smi.
833 __ jmp(lhs_not_nan); 842 __ jmp(lhs_not_nan);
834 843
835 __ bind(&rhs_is_smi); 844 __ bind(&rhs_is_smi);
836 // Rhs is a smi. Check whether the non-smi lhs is a heap number. 845 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
837 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); 846 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
838 if (strict) { 847 if (strict) {
839 // If lhs is not a number and rhs is a smi then strict equality cannot 848 // If lhs is not a number and rhs is a smi then strict equality cannot
840 // succeed. Return non-equal. 849 // succeed. Return non-equal.
841 // If lhs is r0 then there is already a non zero value in it. 850 // If lhs is r0 then there is already a non zero value in it.
842 if (!lhs.is(r0)) { 851 if (!lhs.is(r0)) {
843 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); 852 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
844 } 853 }
845 __ Ret(ne); 854 __ Ret(ne);
846 } else { 855 } else {
847 // Smi compared non-strictly with a non-smi non-heap-number. Call 856 // Smi compared non-strictly with a non-smi non-heap-number. Call
848 // the runtime. 857 // the runtime.
849 __ b(ne, slow); 858 __ b(ne, slow);
850 } 859 }
851 860
852 // Rhs is a smi, lhs is a heap number. 861 // Rhs is a smi, lhs is a heap number.
853 // Load the double from lhs, tagged HeapNumber r1, to d7. 862 // Load the double from lhs, tagged HeapNumber r1, to d7.
854 __ sub(r7, lhs, Operand(kHeapObjectTag)); 863 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
855 __ vldr(d7, r7, HeapNumber::kValueOffset);
856 // Convert rhs to a double in d6 . 864 // Convert rhs to a double in d6 .
857 __ SmiToDouble(d6, rhs); 865 __ SmiToDouble(d6, rhs);
858 // Fall through to both_loaded_as_doubles. 866 // Fall through to both_loaded_as_doubles.
859 } 867 }
860 868
861 869
862 // See comment at call site. 870 // See comment at call site.
863 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 871 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
864 Register lhs, 872 Register lhs,
865 Register rhs) { 873 Register rhs) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
913 (lhs.is(r1) && rhs.is(r0))); 921 (lhs.is(r1) && rhs.is(r0)));
914 922
915 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); 923 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
916 __ b(ne, not_heap_numbers); 924 __ b(ne, not_heap_numbers);
917 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); 925 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
918 __ cmp(r2, r3); 926 __ cmp(r2, r3);
919 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. 927 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
920 928
921 // Both are heap numbers. Load them up then jump to the code we have 929 // Both are heap numbers. Load them up then jump to the code we have
922 // for that. 930 // for that.
923 __ sub(r7, rhs, Operand(kHeapObjectTag)); 931 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
924 __ vldr(d6, r7, HeapNumber::kValueOffset); 932 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
925 __ sub(r7, lhs, Operand(kHeapObjectTag));
926 __ vldr(d7, r7, HeapNumber::kValueOffset);
927 __ jmp(both_loaded_as_doubles); 933 __ jmp(both_loaded_as_doubles);
928 } 934 }
929 935
930 936
931 // Fast negative check for internalized-to-internalized equality. 937 // Fast negative check for internalized-to-internalized equality.
932 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, 938 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
933 Register lhs, 939 Register lhs,
934 Register rhs, 940 Register rhs,
935 Label* possible_strings, 941 Label* possible_strings,
936 Label* not_both_strings) { 942 Label* not_both_strings) {
(...skipping 28 matching lines...) Expand all
965 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); 971 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
966 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); 972 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
967 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); 973 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
968 __ and_(r0, r2, Operand(r3)); 974 __ and_(r0, r2, Operand(r3));
969 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); 975 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
970 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); 976 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
971 __ Ret(); 977 __ Ret();
972 } 978 }
973 979
974 980
975 void NumberToStringStub::Generate(MacroAssembler* masm) {
976 Label runtime;
977
978 __ ldr(r1, MemOperand(sp, 0));
979
980 // Generate code to lookup number in the number string cache.
981 __ LookupNumberStringCache(r1, r0, r2, r3, r4, &runtime);
982 __ add(sp, sp, Operand(1 * kPointerSize));
983 __ Ret();
984
985 __ bind(&runtime);
986 // Handle number to string in the runtime system if not found in the cache.
987 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
988 }
989
990
991 static void ICCompareStub_CheckInputType(MacroAssembler* masm, 981 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
992 Register input, 982 Register input,
993 Register scratch, 983 Register scratch,
994 CompareIC::State expected, 984 CompareIC::State expected,
995 Label* fail) { 985 Label* fail) {
996 Label ok; 986 Label ok;
997 if (expected == CompareIC::SMI) { 987 if (expected == CompareIC::SMI) {
998 __ JumpIfNotSmi(input, fail); 988 __ JumpIfNotSmi(input, fail);
999 } else if (expected == CompareIC::NUMBER) { 989 } else if (expected == CompareIC::NUMBER) {
1000 __ JumpIfSmi(input, &ok); 990 __ JumpIfSmi(input, &ok);
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
1260 } 1250 }
1261 1251
1262 1252
1263 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( 1253 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1264 MacroAssembler* masm) { 1254 MacroAssembler* masm) {
1265 UNIMPLEMENTED(); 1255 UNIMPLEMENTED();
1266 } 1256 }
1267 1257
1268 1258
1269 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, 1259 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
1270 Token::Value op) { 1260 Token::Value op,
1261 Register scratch1,
1262 Register scratch2) {
1271 Register left = r1; 1263 Register left = r1;
1272 Register right = r0; 1264 Register right = r0;
1273 Register scratch1 = r7;
1274 Register scratch2 = r9;
1275 1265
1276 ASSERT(right.is(r0)); 1266 ASSERT(right.is(r0));
1267 ASSERT(!AreAliased(left, right, scratch1, scratch2, ip));
1277 STATIC_ASSERT(kSmiTag == 0); 1268 STATIC_ASSERT(kSmiTag == 0);
1278 1269
1279 Label not_smi_result; 1270 Label not_smi_result;
1280 switch (op) { 1271 switch (op) {
1281 case Token::ADD: 1272 case Token::ADD:
1282 __ add(right, left, Operand(right), SetCC); // Add optimistically. 1273 __ add(right, left, Operand(right), SetCC); // Add optimistically.
1283 __ Ret(vc); 1274 __ Ret(vc);
1284 __ sub(right, right, Operand(left)); // Revert optimistic add. 1275 __ sub(right, right, Operand(left)); // Revert optimistic add.
1285 break; 1276 break;
1286 case Token::SUB: 1277 case Token::SUB:
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
1481 1472
1482 1473
1483 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, 1474 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
1484 BinaryOpIC::TypeInfo left_type, 1475 BinaryOpIC::TypeInfo left_type,
1485 BinaryOpIC::TypeInfo right_type, 1476 BinaryOpIC::TypeInfo right_type,
1486 bool smi_operands, 1477 bool smi_operands,
1487 Label* not_numbers, 1478 Label* not_numbers,
1488 Label* gc_required, 1479 Label* gc_required,
1489 Label* miss, 1480 Label* miss,
1490 Token::Value op, 1481 Token::Value op,
1491 OverwriteMode mode) { 1482 OverwriteMode mode,
1483 Register scratch1,
1484 Register scratch2,
1485 Register scratch3,
1486 Register scratch4) {
1492 Register left = r1; 1487 Register left = r1;
1493 Register right = r0; 1488 Register right = r0;
1494 Register scratch1 = r6; 1489 Register result = scratch3;
1495 Register scratch2 = r7; 1490 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
1496 1491
1497 ASSERT(smi_operands || (not_numbers != NULL)); 1492 ASSERT(smi_operands || (not_numbers != NULL));
1498 if (smi_operands) { 1493 if (smi_operands) {
1499 __ AssertSmi(left); 1494 __ AssertSmi(left);
1500 __ AssertSmi(right); 1495 __ AssertSmi(right);
1501 } 1496 }
1502 if (left_type == BinaryOpIC::SMI) { 1497 if (left_type == BinaryOpIC::SMI) {
1503 __ JumpIfNotSmi(left, miss); 1498 __ JumpIfNotSmi(left, miss);
1504 } 1499 }
1505 if (right_type == BinaryOpIC::SMI) { 1500 if (right_type == BinaryOpIC::SMI) {
1506 __ JumpIfNotSmi(right, miss); 1501 __ JumpIfNotSmi(right, miss);
1507 } 1502 }
1508 1503
1509 Register heap_number_map = r9; 1504 Register heap_number_map = scratch4;
1510 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1505 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1511 1506
1512 switch (op) { 1507 switch (op) {
1513 case Token::ADD: 1508 case Token::ADD:
1514 case Token::SUB: 1509 case Token::SUB:
1515 case Token::MUL: 1510 case Token::MUL:
1516 case Token::DIV: 1511 case Token::DIV:
1517 case Token::MOD: { 1512 case Token::MOD: {
1518 // Allocate new heap number for result. 1513 // Allocate new heap number for result.
1519 Register result = r5;
1520 BinaryOpStub_GenerateHeapResultAllocation( 1514 BinaryOpStub_GenerateHeapResultAllocation(
1521 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); 1515 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
1522 1516
1523 // Load left and right operands into d0 and d1. 1517 // Load left and right operands into d0 and d1.
1524 if (smi_operands) { 1518 if (smi_operands) {
1525 __ SmiToDouble(d1, right); 1519 __ SmiToDouble(d1, right);
1526 __ SmiToDouble(d0, left); 1520 __ SmiToDouble(d0, left);
1527 } else { 1521 } else {
1528 // Load right operand into d1. 1522 // Load right operand into d1.
1529 if (right_type == BinaryOpIC::INT32) { 1523 if (right_type == BinaryOpIC::INT32) {
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
1628 default: 1622 default:
1629 UNREACHABLE(); 1623 UNREACHABLE();
1630 } 1624 }
1631 1625
1632 // Check that the *signed* result fits in a smi. 1626 // Check that the *signed* result fits in a smi.
1633 __ TrySmiTag(r0, r2, &result_not_a_smi); 1627 __ TrySmiTag(r0, r2, &result_not_a_smi);
1634 __ Ret(); 1628 __ Ret();
1635 1629
1636 // Allocate new heap number for result. 1630 // Allocate new heap number for result.
1637 __ bind(&result_not_a_smi); 1631 __ bind(&result_not_a_smi);
1638 Register result = r5;
1639 if (smi_operands) { 1632 if (smi_operands) {
1640 __ AllocateHeapNumber( 1633 __ AllocateHeapNumber(
1641 result, scratch1, scratch2, heap_number_map, gc_required); 1634 result, scratch1, scratch2, heap_number_map, gc_required);
1642 } else { 1635 } else {
1643 BinaryOpStub_GenerateHeapResultAllocation( 1636 BinaryOpStub_GenerateHeapResultAllocation(
1644 masm, result, heap_number_map, scratch1, scratch2, gc_required, 1637 masm, result, heap_number_map, scratch1, scratch2, gc_required,
1645 mode); 1638 mode);
1646 } 1639 }
1647 1640
1648 // r2: Answer as signed int32. 1641 // r2: Answer as signed int32.
1649 // r5: Heap number to write answer into. 1642 // result: Heap number to write answer into.
1650 1643
1651 // Nothing can go wrong now, so move the heap number to r0, which is the 1644 // Nothing can go wrong now, so move the heap number to r0, which is the
1652 // result. 1645 // result.
1653 __ mov(r0, Operand(r5)); 1646 __ mov(r0, Operand(result));
1654 1647
1655 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As 1648 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
1656 // mentioned above SHR needs to always produce a positive result. 1649 // mentioned above SHR needs to always produce a positive result.
1657 __ vmov(s0, r2); 1650 __ vmov(s0, r2);
1658 if (op == Token::SHR) { 1651 if (op == Token::SHR) {
1659 __ vcvt_f64_u32(d0, s0); 1652 __ vcvt_f64_u32(d0, s0);
1660 } else { 1653 } else {
1661 __ vcvt_f64_s32(d0, s0); 1654 __ vcvt_f64_s32(d0, s0);
1662 } 1655 }
1663 __ sub(r3, r0, Operand(kHeapObjectTag)); 1656 __ sub(r3, r0, Operand(kHeapObjectTag));
(...skipping 10 matching lines...) Expand all
1674 // Generate the smi code. If the operation on smis are successful this return is 1667 // Generate the smi code. If the operation on smis are successful this return is
1675 // generated. If the result is not a smi and heap number allocation is not 1668 // generated. If the result is not a smi and heap number allocation is not
1676 // requested the code falls through. If number allocation is requested but a 1669 // requested the code falls through. If number allocation is requested but a
1677 // heap number cannot be allocated the code jumps to the label gc_required. 1670 // heap number cannot be allocated the code jumps to the label gc_required.
1678 void BinaryOpStub_GenerateSmiCode( 1671 void BinaryOpStub_GenerateSmiCode(
1679 MacroAssembler* masm, 1672 MacroAssembler* masm,
1680 Label* use_runtime, 1673 Label* use_runtime,
1681 Label* gc_required, 1674 Label* gc_required,
1682 Token::Value op, 1675 Token::Value op,
1683 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, 1676 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
1684 OverwriteMode mode) { 1677 OverwriteMode mode,
1678 Register scratch1,
1679 Register scratch2,
1680 Register scratch3,
1681 Register scratch4) {
1685 Label not_smis; 1682 Label not_smis;
1686 1683
1687 Register left = r1; 1684 Register left = r1;
1688 Register right = r0; 1685 Register right = r0;
1689 Register scratch1 = r7; 1686 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
1690 1687
1691 // Perform combined smi check on both operands. 1688 // Perform combined smi check on both operands.
1692 __ orr(scratch1, left, Operand(right)); 1689 __ orr(scratch1, left, Operand(right));
1693 __ JumpIfNotSmi(scratch1, &not_smis); 1690 __ JumpIfNotSmi(scratch1, &not_smis);
1694 1691
1695 // If the smi-smi operation results in a smi return is generated. 1692 // If the smi-smi operation results in a smi return is generated.
1696 BinaryOpStub_GenerateSmiSmiOperation(masm, op); 1693 BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2);
1697 1694
1698 // If heap number results are possible generate the result in an allocated 1695 // If heap number results are possible generate the result in an allocated
1699 // heap number. 1696 // heap number.
1700 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { 1697 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
1701 BinaryOpStub_GenerateFPOperation( 1698 BinaryOpStub_GenerateFPOperation(
1702 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, 1699 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
1703 use_runtime, gc_required, &not_smis, op, mode); 1700 use_runtime, gc_required, &not_smis, op, mode, scratch2, scratch3,
1701 scratch1, scratch4);
1704 } 1702 }
1705 __ bind(&not_smis); 1703 __ bind(&not_smis);
1706 } 1704 }
1707 1705
1708 1706
1709 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 1707 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1710 Label right_arg_changed, call_runtime; 1708 Label right_arg_changed, call_runtime;
1711 1709
1712 if (op_ == Token::MOD && encoded_right_arg_.has_value) { 1710 if (op_ == Token::MOD && encoded_right_arg_.has_value) {
1713 // It is guaranteed that the value will fit into a Smi, because if it 1711 // It is guaranteed that the value will fit into a Smi, because if it
1714 // didn't, we wouldn't be here, see BinaryOp_Patch. 1712 // didn't, we wouldn't be here, see BinaryOp_Patch.
1715 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); 1713 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
1716 __ b(ne, &right_arg_changed); 1714 __ b(ne, &right_arg_changed);
1717 } 1715 }
1718 1716
1719 if (result_type_ == BinaryOpIC::UNINITIALIZED || 1717 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1720 result_type_ == BinaryOpIC::SMI) { 1718 result_type_ == BinaryOpIC::SMI) {
1721 // Only allow smi results. 1719 // Only allow smi results.
1722 BinaryOpStub_GenerateSmiCode( 1720 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_,
1723 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); 1721 NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
1724 } else { 1722 } else {
1725 // Allow heap number result and don't make a transition if a heap number 1723 // Allow heap number result and don't make a transition if a heap number
1726 // cannot be allocated. 1724 // cannot be allocated.
1727 BinaryOpStub_GenerateSmiCode( 1725 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_,
1728 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, 1726 ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
1729 mode_);
1730 } 1727 }
1731 1728
1732 // Code falls through if the result is not returned as either a smi or heap 1729 // Code falls through if the result is not returned as either a smi or heap
1733 // number. 1730 // number.
1734 __ bind(&right_arg_changed); 1731 __ bind(&right_arg_changed);
1735 GenerateTypeTransition(masm); 1732 GenerateTypeTransition(masm);
1736 1733
1737 __ bind(&call_runtime); 1734 __ bind(&call_runtime);
1738 { 1735 {
1739 FrameScope scope(masm, StackFrame::INTERNAL); 1736 FrameScope scope(masm, StackFrame::INTERNAL);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1773 __ bind(&call_runtime); 1770 __ bind(&call_runtime);
1774 GenerateTypeTransition(masm); 1771 GenerateTypeTransition(masm);
1775 } 1772 }
1776 1773
1777 1774
1778 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 1775 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1779 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); 1776 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1780 1777
1781 Register left = r1; 1778 Register left = r1;
1782 Register right = r0; 1779 Register right = r0;
1783 Register scratch1 = r7; 1780 Register scratch1 = r4;
1784 Register scratch2 = r9; 1781 Register scratch2 = r9;
1782 Register scratch3 = r5;
1785 LowDwVfpRegister double_scratch = d0; 1783 LowDwVfpRegister double_scratch = d0;
1786 1784
1787 Register heap_number_result = no_reg; 1785 Register heap_number_result = no_reg;
1788 Register heap_number_map = r6; 1786 Register heap_number_map = r6;
1789 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1787 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1790 1788
1791 Label call_runtime; 1789 Label call_runtime;
1792 // Labels for type transition, used for wrong input or output types. 1790 // Labels for type transition, used for wrong input or output types.
1793 // Both label are currently actually bound to the same position. We use two 1791 // Both label are currently actually bound to the same position. We use two
1794 // different label to differentiate the cause leading to type transition. 1792 // different label to differentiate the cause leading to type transition.
1795 Label transition; 1793 Label transition;
1796 1794
1797 // Smi-smi fast case. 1795 // Smi-smi fast case.
1798 Label skip; 1796 Label skip;
1799 __ orr(scratch1, left, right); 1797 __ orr(scratch1, left, right);
1800 __ JumpIfNotSmi(scratch1, &skip); 1798 __ JumpIfNotSmi(scratch1, &skip);
1801 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); 1799 BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3);
1802 // Fall through if the result is not a smi. 1800 // Fall through if the result is not a smi.
1803 __ bind(&skip); 1801 __ bind(&skip);
1804 1802
1805 switch (op_) { 1803 switch (op_) {
1806 case Token::ADD: 1804 case Token::ADD:
1807 case Token::SUB: 1805 case Token::SUB:
1808 case Token::MUL: 1806 case Token::MUL:
1809 case Token::DIV: 1807 case Token::DIV:
1810 case Token::MOD: { 1808 case Token::MOD: {
1811 // It could be that only SMIs have been seen at either the left 1809 // It could be that only SMIs have been seen at either the left
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1885 // A DIV operation expecting an integer result falls through 1883 // A DIV operation expecting an integer result falls through
1886 // to type transition. 1884 // to type transition.
1887 1885
1888 } else { 1886 } else {
1889 if (encoded_right_arg_.has_value) { 1887 if (encoded_right_arg_.has_value) {
1890 __ Vmov(d8, fixed_right_arg_value(), scratch1); 1888 __ Vmov(d8, fixed_right_arg_value(), scratch1);
1891 __ VFPCompareAndSetFlags(d1, d8); 1889 __ VFPCompareAndSetFlags(d1, d8);
1892 __ b(ne, &transition); 1890 __ b(ne, &transition);
1893 } 1891 }
1894 1892
1895 // We preserved r0 and r1 to be able to call runtime.
1896 // Save the left value on the stack.
1897 __ Push(r5, r4);
1898
1899 Label pop_and_call_runtime;
1900
1901 // Allocate a heap number to store the result. 1893 // Allocate a heap number to store the result.
1902 heap_number_result = r5; 1894 heap_number_result = r5;
1903 BinaryOpStub_GenerateHeapResultAllocation(masm, 1895 BinaryOpStub_GenerateHeapResultAllocation(masm,
1904 heap_number_result, 1896 heap_number_result,
1905 heap_number_map, 1897 heap_number_map,
1906 scratch1, 1898 scratch1,
1907 scratch2, 1899 scratch2,
1908 &pop_and_call_runtime, 1900 &call_runtime,
1909 mode_); 1901 mode_);
1910 1902
1911 // Load the left value from the value saved on the stack.
1912 __ Pop(r1, r0);
1913
1914 // Call the C function to handle the double operation. 1903 // Call the C function to handle the double operation.
1915 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); 1904 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
1916 if (FLAG_debug_code) { 1905 if (FLAG_debug_code) {
1917 __ stop("Unreachable code."); 1906 __ stop("Unreachable code.");
1918 } 1907 }
1919 1908
1920 __ bind(&pop_and_call_runtime);
1921 __ Drop(2);
1922 __ b(&call_runtime); 1909 __ b(&call_runtime);
1923 } 1910 }
1924 1911
1925 break; 1912 break;
1926 } 1913 }
1927 1914
1928 case Token::BIT_OR: 1915 case Token::BIT_OR:
1929 case Token::BIT_XOR: 1916 case Token::BIT_XOR:
1930 case Token::BIT_AND: 1917 case Token::BIT_AND:
1931 case Token::SAR: 1918 case Token::SAR:
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
2062 __ bind(&done); 2049 __ bind(&done);
2063 2050
2064 GenerateNumberStub(masm); 2051 GenerateNumberStub(masm);
2065 } 2052 }
2066 2053
2067 2054
2068 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { 2055 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
2069 Label call_runtime, transition; 2056 Label call_runtime, transition;
2070 BinaryOpStub_GenerateFPOperation( 2057 BinaryOpStub_GenerateFPOperation(
2071 masm, left_type_, right_type_, false, 2058 masm, left_type_, right_type_, false,
2072 &transition, &call_runtime, &transition, op_, mode_); 2059 &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9);
2073 2060
2074 __ bind(&transition); 2061 __ bind(&transition);
2075 GenerateTypeTransition(masm); 2062 GenerateTypeTransition(masm);
2076 2063
2077 __ bind(&call_runtime); 2064 __ bind(&call_runtime);
2078 { 2065 {
2079 FrameScope scope(masm, StackFrame::INTERNAL); 2066 FrameScope scope(masm, StackFrame::INTERNAL);
2080 GenerateRegisterArgsPush(masm); 2067 GenerateRegisterArgsPush(masm);
2081 GenerateCallRuntime(masm); 2068 GenerateCallRuntime(masm);
2082 } 2069 }
2083 __ Ret(); 2070 __ Ret();
2084 } 2071 }
2085 2072
2086 2073
2087 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 2074 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2088 Label call_runtime, call_string_add_or_runtime, transition; 2075 Label call_runtime, call_string_add_or_runtime, transition;
2089 2076
2090 BinaryOpStub_GenerateSmiCode( 2077 BinaryOpStub_GenerateSmiCode(
2091 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); 2078 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_,
2079 r5, r6, r4, r9);
2092 2080
2093 BinaryOpStub_GenerateFPOperation( 2081 BinaryOpStub_GenerateFPOperation(
2094 masm, left_type_, right_type_, false, 2082 masm, left_type_, right_type_, false,
2095 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); 2083 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6,
2084 r4, r5, r9);
2096 2085
2097 __ bind(&transition); 2086 __ bind(&transition);
2098 GenerateTypeTransition(masm); 2087 GenerateTypeTransition(masm);
2099 2088
2100 __ bind(&call_string_add_or_runtime); 2089 __ bind(&call_string_add_or_runtime);
2101 if (op_ == Token::ADD) { 2090 if (op_ == Token::ADD) {
2102 GenerateAddStrings(masm); 2091 GenerateAddStrings(masm);
2103 } 2092 }
2104 2093
2105 __ bind(&call_runtime); 2094 __ bind(&call_runtime);
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
2187 // Untagged case: double input in d2, double result goes 2176 // Untagged case: double input in d2, double result goes
2188 // into d2. 2177 // into d2.
2189 // Tagged case: tagged input on top of stack and in r0, 2178 // Tagged case: tagged input on top of stack and in r0,
2190 // tagged result (heap number) goes into r0. 2179 // tagged result (heap number) goes into r0.
2191 2180
2192 Label input_not_smi; 2181 Label input_not_smi;
2193 Label loaded; 2182 Label loaded;
2194 Label calculate; 2183 Label calculate;
2195 Label invalid_cache; 2184 Label invalid_cache;
2196 const Register scratch0 = r9; 2185 const Register scratch0 = r9;
2197 const Register scratch1 = r7; 2186 Register scratch1 = no_reg; // will be r4
2198 const Register cache_entry = r0; 2187 const Register cache_entry = r0;
2199 const bool tagged = (argument_type_ == TAGGED); 2188 const bool tagged = (argument_type_ == TAGGED);
2200 2189
2201 if (tagged) { 2190 if (tagged) {
2202 // Argument is a number and is on stack and in r0. 2191 // Argument is a number and is on stack and in r0.
2203 // Load argument and check if it is a smi. 2192 // Load argument and check if it is a smi.
2204 __ JumpIfNotSmi(r0, &input_not_smi); 2193 __ JumpIfNotSmi(r0, &input_not_smi);
2205 2194
2206 // Input is a smi. Convert to double and load the low and high words 2195 // Input is a smi. Convert to double and load the low and high words
2207 // of the double into r2, r3. 2196 // of the double into r2, r3.
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
2267 #endif 2256 #endif
2268 2257
2269 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. 2258 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
2270 __ add(r1, r1, Operand(r1, LSL, 1)); 2259 __ add(r1, r1, Operand(r1, LSL, 1));
2271 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); 2260 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
2272 // Check if cache matches: Double value is stored in uint32_t[2] array. 2261 // Check if cache matches: Double value is stored in uint32_t[2] array.
2273 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); 2262 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
2274 __ cmp(r2, r4); 2263 __ cmp(r2, r4);
2275 __ cmp(r3, r5, eq); 2264 __ cmp(r3, r5, eq);
2276 __ b(ne, &calculate); 2265 __ b(ne, &calculate);
2266
2267 scratch1 = r4; // Start of scratch1 range.
2268
2277 // Cache hit. Load result, cleanup and return. 2269 // Cache hit. Load result, cleanup and return.
2278 Counters* counters = masm->isolate()->counters(); 2270 Counters* counters = masm->isolate()->counters();
2279 __ IncrementCounter( 2271 __ IncrementCounter(
2280 counters->transcendental_cache_hit(), 1, scratch0, scratch1); 2272 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
2281 if (tagged) { 2273 if (tagged) {
2282 // Pop input value from stack and load result into r0. 2274 // Pop input value from stack and load result into r0.
2283 __ pop(); 2275 __ pop();
2284 __ mov(r0, Operand(r6)); 2276 __ mov(r0, Operand(r6));
2285 } else { 2277 } else {
2286 // Load result into d2. 2278 // Load result into d2.
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
2409 const Register base = r1; 2401 const Register base = r1;
2410 const Register exponent = r2; 2402 const Register exponent = r2;
2411 const Register heapnumbermap = r5; 2403 const Register heapnumbermap = r5;
2412 const Register heapnumber = r0; 2404 const Register heapnumber = r0;
2413 const DwVfpRegister double_base = d1; 2405 const DwVfpRegister double_base = d1;
2414 const DwVfpRegister double_exponent = d2; 2406 const DwVfpRegister double_exponent = d2;
2415 const DwVfpRegister double_result = d3; 2407 const DwVfpRegister double_result = d3;
2416 const DwVfpRegister double_scratch = d0; 2408 const DwVfpRegister double_scratch = d0;
2417 const SwVfpRegister single_scratch = s0; 2409 const SwVfpRegister single_scratch = s0;
2418 const Register scratch = r9; 2410 const Register scratch = r9;
2419 const Register scratch2 = r7; 2411 const Register scratch2 = r4;
2420 2412
2421 Label call_runtime, done, int_exponent; 2413 Label call_runtime, done, int_exponent;
2422 if (exponent_type_ == ON_STACK) { 2414 if (exponent_type_ == ON_STACK) {
2423 Label base_is_smi, unpack_exponent; 2415 Label base_is_smi, unpack_exponent;
2424 // The exponent and base are supplied as arguments on the stack. 2416 // The exponent and base are supplied as arguments on the stack.
2425 // This can only happen if the stub is called from non-optimized code. 2417 // This can only happen if the stub is called from non-optimized code.
2426 // Load input parameters from stack to double registers. 2418 // Load input parameters from stack to double registers.
2427 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); 2419 __ ldr(base, MemOperand(sp, 1 * kPointerSize));
2428 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); 2420 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
2429 2421
(...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after
2919 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; 2911 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
2920 __ ldr(r4, MemOperand(sp, offset_to_argv)); 2912 __ ldr(r4, MemOperand(sp, offset_to_argv));
2921 2913
2922 // Push a frame with special values setup to mark it as an entry frame. 2914 // Push a frame with special values setup to mark it as an entry frame.
2923 // r0: code entry 2915 // r0: code entry
2924 // r1: function 2916 // r1: function
2925 // r2: receiver 2917 // r2: receiver
2926 // r3: argc 2918 // r3: argc
2927 // r4: argv 2919 // r4: argv
2928 Isolate* isolate = masm->isolate(); 2920 Isolate* isolate = masm->isolate();
2929 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
2930 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; 2921 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
2931 __ mov(r7, Operand(Smi::FromInt(marker))); 2922 __ mov(r8, Operand(Smi::FromInt(marker)));
2932 __ mov(r6, Operand(Smi::FromInt(marker))); 2923 __ mov(r6, Operand(Smi::FromInt(marker)));
2933 __ mov(r5, 2924 __ mov(r5,
2934 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); 2925 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
2935 __ ldr(r5, MemOperand(r5)); 2926 __ ldr(r5, MemOperand(r5));
2936 __ Push(r8, r7, r6, r5); 2927 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
2928 __ Push(ip, r8, r6, r5);
2937 2929
2938 // Set up frame pointer for the frame to be pushed. 2930 // Set up frame pointer for the frame to be pushed.
2939 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); 2931 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
2940 2932
2941 // If this is the outermost JS call, set js_entry_sp value. 2933 // If this is the outermost JS call, set js_entry_sp value.
2942 Label non_outermost_js; 2934 Label non_outermost_js;
2943 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); 2935 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
2944 __ mov(r5, Operand(ExternalReference(js_entry_sp))); 2936 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
2945 __ ldr(r6, MemOperand(r5)); 2937 __ ldr(r6, MemOperand(r5));
2946 __ cmp(r6, Operand::Zero()); 2938 __ cmp(r6, Operand::Zero());
(...skipping 25 matching lines...) Expand all
2972 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 2964 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2973 isolate))); 2965 isolate)));
2974 } 2966 }
2975 __ str(r0, MemOperand(ip)); 2967 __ str(r0, MemOperand(ip));
2976 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); 2968 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
2977 __ b(&exit); 2969 __ b(&exit);
2978 2970
2979 // Invoke: Link this frame into the handler chain. There's only one 2971 // Invoke: Link this frame into the handler chain. There's only one
2980 // handler block in this code object, so its index is 0. 2972 // handler block in this code object, so its index is 0.
2981 __ bind(&invoke); 2973 __ bind(&invoke);
2982 // Must preserve r0-r4, r5-r7 are available. 2974 // Must preserve r0-r4, r5-r6 are available.
2983 __ PushTryHandler(StackHandler::JS_ENTRY, 0); 2975 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
2984 // If an exception not caught by another handler occurs, this handler 2976 // If an exception not caught by another handler occurs, this handler
2985 // returns control to the code after the bl(&invoke) above, which 2977 // returns control to the code after the bl(&invoke) above, which
2986 // restores all kCalleeSaved registers (including cp and fp) to their 2978 // restores all kCalleeSaved registers (including cp and fp) to their
2987 // saved values before returning a failure to C. 2979 // saved values before returning a failure to C.
2988 2980
2989 // Clear any pending exceptions. 2981 // Clear any pending exceptions.
2990 __ mov(r5, Operand(isolate->factory()->the_hole_value())); 2982 __ mov(r5, Operand(isolate->factory()->the_hole_value()));
2991 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 2983 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2992 isolate))); 2984 isolate)));
(...skipping 586 matching lines...) Expand 10 before | Expand all | Expand 10 after
3579 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 3571 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3580 // The mapped parameter thus need to get indices 3572 // The mapped parameter thus need to get indices
3581 // MIN_CONTEXT_SLOTS+parameter_count-1 .. 3573 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
3582 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count 3574 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3583 // We loop from right to left. 3575 // We loop from right to left.
3584 Label parameters_loop, parameters_test; 3576 Label parameters_loop, parameters_test;
3585 __ mov(r6, r1); 3577 __ mov(r6, r1);
3586 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); 3578 __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
3587 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); 3579 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3588 __ sub(r9, r9, Operand(r1)); 3580 __ sub(r9, r9, Operand(r1));
3589 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); 3581 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
3590 __ add(r3, r4, Operand(r6, LSL, 1)); 3582 __ add(r3, r4, Operand(r6, LSL, 1));
3591 __ add(r3, r3, Operand(kParameterMapHeaderSize)); 3583 __ add(r3, r3, Operand(kParameterMapHeaderSize));
3592 3584
3593 // r6 = loop variable (tagged) 3585 // r6 = loop variable (tagged)
3594 // r1 = mapping index (tagged) 3586 // r1 = mapping index (tagged)
3595 // r3 = address of backing store (tagged) 3587 // r3 = address of backing store (tagged)
3596 // r4 = address of parameter map (tagged) 3588 // r4 = address of parameter map (tagged), which is also the address of new
3597 // r5 = temporary scratch (a.o., for address calculation) 3589 // object + Heap::kArgumentsObjectSize (tagged)
3598 // r7 = the hole value 3590 // r0 = temporary scratch (a.o., for address calculation)
3591 // r5 = the hole value
3599 __ jmp(&parameters_test); 3592 __ jmp(&parameters_test);
3600 3593
3601 __ bind(&parameters_loop); 3594 __ bind(&parameters_loop);
3602 __ sub(r6, r6, Operand(Smi::FromInt(1))); 3595 __ sub(r6, r6, Operand(Smi::FromInt(1)));
3603 __ mov(r5, Operand(r6, LSL, 1)); 3596 __ mov(r0, Operand(r6, LSL, 1));
3604 __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); 3597 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
3605 __ str(r9, MemOperand(r4, r5)); 3598 __ str(r9, MemOperand(r4, r0));
3606 __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); 3599 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
3607 __ str(r7, MemOperand(r3, r5)); 3600 __ str(r5, MemOperand(r3, r0));
3608 __ add(r9, r9, Operand(Smi::FromInt(1))); 3601 __ add(r9, r9, Operand(Smi::FromInt(1)));
3609 __ bind(&parameters_test); 3602 __ bind(&parameters_test);
3610 __ cmp(r6, Operand(Smi::FromInt(0))); 3603 __ cmp(r6, Operand(Smi::FromInt(0)));
3611 __ b(ne, &parameters_loop); 3604 __ b(ne, &parameters_loop);
3612 3605
3606 // Restore r0 = new object (tagged)
3607 __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
3608
3613 __ bind(&skip_parameter_map); 3609 __ bind(&skip_parameter_map);
3610 // r0 = address of new object (tagged)
3614 // r2 = argument count (tagged) 3611 // r2 = argument count (tagged)
3615 // r3 = address of backing store (tagged) 3612 // r3 = address of backing store (tagged)
3616 // r5 = scratch 3613 // r5 = scratch
3617 // Copy arguments header and remaining slots (if there are any). 3614 // Copy arguments header and remaining slots (if there are any).
3618 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); 3615 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
3619 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); 3616 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
3620 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); 3617 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
3621 3618
3622 Label arguments_loop, arguments_test; 3619 Label arguments_loop, arguments_test;
3623 __ mov(r9, r1); 3620 __ mov(r9, r1);
(...skipping 10 matching lines...) Expand all
3634 3631
3635 __ bind(&arguments_test); 3632 __ bind(&arguments_test);
3636 __ cmp(r9, Operand(r2)); 3633 __ cmp(r9, Operand(r2));
3637 __ b(lt, &arguments_loop); 3634 __ b(lt, &arguments_loop);
3638 3635
3639 // Return and remove the on-stack parameters. 3636 // Return and remove the on-stack parameters.
3640 __ add(sp, sp, Operand(3 * kPointerSize)); 3637 __ add(sp, sp, Operand(3 * kPointerSize));
3641 __ Ret(); 3638 __ Ret();
3642 3639
3643 // Do the runtime call to allocate the arguments object. 3640 // Do the runtime call to allocate the arguments object.
3641 // r0 = address of new object (tagged)
3644 // r2 = argument count (tagged) 3642 // r2 = argument count (tagged)
3645 __ bind(&runtime); 3643 __ bind(&runtime);
3646 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. 3644 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
3647 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 3645 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3648 } 3646 }
3649 3647
3650 3648
3651 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { 3649 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3652 // sp[0] : number of parameters 3650 // sp[0] : number of parameters
3653 // sp[4] : receiver displacement 3651 // sp[4] : receiver displacement
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
3762 const int kJSRegExpOffset = 3 * kPointerSize; 3760 const int kJSRegExpOffset = 3 * kPointerSize;
3763 3761
3764 Label runtime; 3762 Label runtime;
3765 // Allocation of registers for this function. These are in callee save 3763 // Allocation of registers for this function. These are in callee save
3766 // registers and will be preserved by the call to the native RegExp code, as 3764 // registers and will be preserved by the call to the native RegExp code, as
3767 // this code is called using the normal C calling convention. When calling 3765 // this code is called using the normal C calling convention. When calling
3768 // directly from generated code the native RegExp code will not do a GC and 3766 // directly from generated code the native RegExp code will not do a GC and
3769 // therefore the content of these registers are safe to use after the call. 3767 // therefore the content of these registers are safe to use after the call.
3770 Register subject = r4; 3768 Register subject = r4;
3771 Register regexp_data = r5; 3769 Register regexp_data = r5;
3772 Register last_match_info_elements = r6; 3770 Register last_match_info_elements = no_reg; // will be r6;
3773 3771
3774 // Ensure that a RegExp stack is allocated. 3772 // Ensure that a RegExp stack is allocated.
3775 Isolate* isolate = masm->isolate(); 3773 Isolate* isolate = masm->isolate();
3776 ExternalReference address_of_regexp_stack_memory_address = 3774 ExternalReference address_of_regexp_stack_memory_address =
3777 ExternalReference::address_of_regexp_stack_memory_address(isolate); 3775 ExternalReference::address_of_regexp_stack_memory_address(isolate);
3778 ExternalReference address_of_regexp_stack_memory_size = 3776 ExternalReference address_of_regexp_stack_memory_size =
3779 ExternalReference::address_of_regexp_stack_memory_size(isolate); 3777 ExternalReference::address_of_regexp_stack_memory_size(isolate);
3780 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); 3778 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
3781 __ ldr(r0, MemOperand(r0, 0)); 3779 __ ldr(r0, MemOperand(r0, 0));
3782 __ cmp(r0, Operand::Zero()); 3780 __ cmp(r0, Operand::Zero());
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
3895 __ JumpIfNotSmi(r1, &runtime); 3893 __ JumpIfNotSmi(r1, &runtime);
3896 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); 3894 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
3897 __ cmp(r3, Operand(r1)); 3895 __ cmp(r3, Operand(r1));
3898 __ b(ls, &runtime); 3896 __ b(ls, &runtime);
3899 __ SmiUntag(r1); 3897 __ SmiUntag(r1);
3900 3898
3901 STATIC_ASSERT(4 == kOneByteStringTag); 3899 STATIC_ASSERT(4 == kOneByteStringTag);
3902 STATIC_ASSERT(kTwoByteStringTag == 0); 3900 STATIC_ASSERT(kTwoByteStringTag == 0);
3903 __ and_(r0, r0, Operand(kStringEncodingMask)); 3901 __ and_(r0, r0, Operand(kStringEncodingMask));
3904 __ mov(r3, Operand(r0, ASR, 2), SetCC); 3902 __ mov(r3, Operand(r0, ASR, 2), SetCC);
3905 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); 3903 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
3906 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); 3904 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
3907 3905
3908 // (E) Carry on. String handling is done. 3906 // (E) Carry on. String handling is done.
3909 // r7: irregexp code 3907 // r6: irregexp code
3910 // Check that the irregexp code has been generated for the actual string 3908 // Check that the irregexp code has been generated for the actual string
3911 // encoding. If it has, the field contains a code object otherwise it contains 3909 // encoding. If it has, the field contains a code object otherwise it contains
3912 // a smi (code flushing support). 3910 // a smi (code flushing support).
3913 __ JumpIfSmi(r7, &runtime); 3911 __ JumpIfSmi(r6, &runtime);
3914 3912
3915 // r1: previous index 3913 // r1: previous index
3916 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); 3914 // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
3917 // r7: code 3915 // r6: code
3918 // subject: Subject string 3916 // subject: Subject string
3919 // regexp_data: RegExp data (FixedArray) 3917 // regexp_data: RegExp data (FixedArray)
3920 // All checks done. Now push arguments for native regexp code. 3918 // All checks done. Now push arguments for native regexp code.
3921 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); 3919 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
3922 3920
3923 // Isolates: note we add an additional parameter here (isolate pointer). 3921 // Isolates: note we add an additional parameter here (isolate pointer).
3924 const int kRegExpExecuteArguments = 9; 3922 const int kRegExpExecuteArguments = 9;
3925 const int kParameterRegisters = 4; 3923 const int kParameterRegisters = 4;
3926 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); 3924 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
3927 3925
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3974 __ SmiUntag(r8); 3972 __ SmiUntag(r8);
3975 __ add(r3, r9, Operand(r8, LSL, r3)); 3973 __ add(r3, r9, Operand(r8, LSL, r3));
3976 3974
3977 // Argument 2 (r1): Previous index. 3975 // Argument 2 (r1): Previous index.
3978 // Already there 3976 // Already there
3979 3977
3980 // Argument 1 (r0): Subject string. 3978 // Argument 1 (r0): Subject string.
3981 __ mov(r0, subject); 3979 __ mov(r0, subject);
3982 3980
3983 // Locate the code entry and call it. 3981 // Locate the code entry and call it.
3984 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); 3982 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
3985 DirectCEntryStub stub; 3983 DirectCEntryStub stub;
3986 stub.GenerateCall(masm, r7); 3984 stub.GenerateCall(masm, r6);
3987 3985
3988 __ LeaveExitFrame(false, no_reg, true); 3986 __ LeaveExitFrame(false, no_reg, true);
3989 3987
3988 last_match_info_elements = r6;
3989
3990 // r0: result 3990 // r0: result
3991 // subject: subject string (callee saved) 3991 // subject: subject string (callee saved)
3992 // regexp_data: RegExp data (callee saved) 3992 // regexp_data: RegExp data (callee saved)
3993 // last_match_info_elements: Last match info elements (callee saved) 3993 // last_match_info_elements: Last match info elements (callee saved)
3994 // Check the result. 3994 // Check the result.
3995 Label success; 3995 Label success;
3996 __ cmp(r0, Operand(1)); 3996 __ cmp(r0, Operand(1));
3997 // We expect exactly one result since we force the called regexp to behave 3997 // We expect exactly one result since we force the called regexp to behave
3998 // as non-global. 3998 // as non-global.
3999 __ b(eq, &success); 3999 __ b(eq, &success);
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
4068 __ str(r2, FieldMemOperand(last_match_info_elements, 4068 __ str(r2, FieldMemOperand(last_match_info_elements,
4069 RegExpImpl::kLastCaptureCountOffset)); 4069 RegExpImpl::kLastCaptureCountOffset));
4070 // Store last subject and last input. 4070 // Store last subject and last input.
4071 __ str(subject, 4071 __ str(subject,
4072 FieldMemOperand(last_match_info_elements, 4072 FieldMemOperand(last_match_info_elements,
4073 RegExpImpl::kLastSubjectOffset)); 4073 RegExpImpl::kLastSubjectOffset));
4074 __ mov(r2, subject); 4074 __ mov(r2, subject);
4075 __ RecordWriteField(last_match_info_elements, 4075 __ RecordWriteField(last_match_info_elements,
4076 RegExpImpl::kLastSubjectOffset, 4076 RegExpImpl::kLastSubjectOffset,
4077 subject, 4077 subject,
4078 r7, 4078 r3,
4079 kLRHasNotBeenSaved, 4079 kLRHasNotBeenSaved,
4080 kDontSaveFPRegs); 4080 kDontSaveFPRegs);
4081 __ mov(subject, r2); 4081 __ mov(subject, r2);
4082 __ str(subject, 4082 __ str(subject,
4083 FieldMemOperand(last_match_info_elements, 4083 FieldMemOperand(last_match_info_elements,
4084 RegExpImpl::kLastInputOffset)); 4084 RegExpImpl::kLastInputOffset));
4085 __ RecordWriteField(last_match_info_elements, 4085 __ RecordWriteField(last_match_info_elements,
4086 RegExpImpl::kLastInputOffset, 4086 RegExpImpl::kLastInputOffset,
4087 subject, 4087 subject,
4088 r7, 4088 r3,
4089 kLRHasNotBeenSaved, 4089 kLRHasNotBeenSaved,
4090 kDontSaveFPRegs); 4090 kDontSaveFPRegs);
4091 4091
4092 // Get the static offsets vector filled by the native regexp code. 4092 // Get the static offsets vector filled by the native regexp code.
4093 ExternalReference address_of_static_offsets_vector = 4093 ExternalReference address_of_static_offsets_vector =
4094 ExternalReference::address_of_static_offsets_vector(isolate); 4094 ExternalReference::address_of_static_offsets_vector(isolate);
4095 __ mov(r2, Operand(address_of_static_offsets_vector)); 4095 __ mov(r2, Operand(address_of_static_offsets_vector));
4096 4096
4097 // r1: number of capture registers 4097 // r1: number of capture registers
4098 // r2: offsets vector 4098 // r2: offsets vector
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
4250 4250
4251 __ bind(&slowcase); 4251 __ bind(&slowcase);
4252 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); 4252 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4253 } 4253 }
4254 4254
4255 4255
4256 static void GenerateRecordCallTarget(MacroAssembler* masm) { 4256 static void GenerateRecordCallTarget(MacroAssembler* masm) {
4257 // Cache the called function in a global property cell. Cache states 4257 // Cache the called function in a global property cell. Cache states
4258 // are uninitialized, monomorphic (indicated by a JSFunction), and 4258 // are uninitialized, monomorphic (indicated by a JSFunction), and
4259 // megamorphic. 4259 // megamorphic.
4260 // r0 : number of arguments to the construct function
4260 // r1 : the function to call 4261 // r1 : the function to call
4261 // r2 : cache cell for call target 4262 // r2 : cache cell for call target
4262 Label initialize, done, miss, megamorphic, not_array_function; 4263 Label initialize, done, miss, megamorphic, not_array_function;
4263 4264
4264 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), 4265 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
4265 masm->isolate()->heap()->undefined_value()); 4266 masm->isolate()->heap()->undefined_value());
4266 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), 4267 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
4267 masm->isolate()->heap()->the_hole_value()); 4268 masm->isolate()->heap()->the_hole_value());
4268 4269
4269 // Load the cache state into r3. 4270 // Load the cache state into r3.
4270 __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset)); 4271 __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
4271 4272
4272 // A monomorphic cache hit or an already megamorphic state: invoke the 4273 // A monomorphic cache hit or an already megamorphic state: invoke the
4273 // function without changing the state. 4274 // function without changing the state.
4274 __ cmp(r3, r1); 4275 __ cmp(r3, r1);
4275 __ b(eq, &done); 4276 __ b(eq, &done);
4276 4277
4277 // If we came here, we need to see if we are the array function. 4278 // If we came here, we need to see if we are the array function.
4278 // If we didn't have a matching function, and we didn't find the megamorph 4279 // If we didn't have a matching function, and we didn't find the megamorph
4279 // sentinel, then we have in the cell either some other function or an 4280 // sentinel, then we have in the cell either some other function or an
4280 // AllocationSite. Do a map check on the object in ecx. 4281 // AllocationSite. Do a map check on the object in ecx.
4281 Handle<Map> allocation_site_map(
4282 masm->isolate()->heap()->allocation_site_map(),
4283 masm->isolate());
4284 __ ldr(r5, FieldMemOperand(r3, 0)); 4282 __ ldr(r5, FieldMemOperand(r3, 0));
4285 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); 4283 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
4286 __ b(ne, &miss); 4284 __ b(ne, &miss);
4287 4285
4288 // Make sure the function is the Array() function 4286 // Make sure the function is the Array() function
4289 __ LoadArrayFunction(r3); 4287 __ LoadArrayFunction(r3);
4290 __ cmp(r1, r3); 4288 __ cmp(r1, r3);
4291 __ b(ne, &megamorphic); 4289 __ b(ne, &megamorphic);
4292 __ jmp(&done); 4290 __ jmp(&done);
4293 4291
(...skipping 16 matching lines...) Expand all
4310 // Make sure the function is the Array() function 4308 // Make sure the function is the Array() function
4311 __ LoadArrayFunction(r3); 4309 __ LoadArrayFunction(r3);
4312 __ cmp(r1, r3); 4310 __ cmp(r1, r3);
4313 __ b(ne, &not_array_function); 4311 __ b(ne, &not_array_function);
4314 4312
4315 // The target function is the Array constructor, 4313 // The target function is the Array constructor,
4316 // Create an AllocationSite if we don't already have it, store it in the cell 4314 // Create an AllocationSite if we don't already have it, store it in the cell
4317 { 4315 {
4318 FrameScope scope(masm, StackFrame::INTERNAL); 4316 FrameScope scope(masm, StackFrame::INTERNAL);
4319 4317
4318 // Arguments register must be smi-tagged to call out.
4320 __ SmiTag(r0); 4319 __ SmiTag(r0);
4321 __ push(r0); 4320 __ push(r0);
4322 __ push(r1); 4321 __ push(r1);
4323 __ push(r2); 4322 __ push(r2);
4324 4323
4325 CreateAllocationSiteStub create_stub; 4324 CreateAllocationSiteStub create_stub;
4326 __ CallStub(&create_stub); 4325 __ CallStub(&create_stub);
4327 4326
4328 __ pop(r2); 4327 __ pop(r2);
4329 __ pop(r1); 4328 __ pop(r1);
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after
4646 4645
4647 4646
4648 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, 4647 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
4649 Register dest, 4648 Register dest,
4650 Register src, 4649 Register src,
4651 Register count, 4650 Register count,
4652 Register scratch1, 4651 Register scratch1,
4653 Register scratch2, 4652 Register scratch2,
4654 Register scratch3, 4653 Register scratch3,
4655 Register scratch4, 4654 Register scratch4,
4656 Register scratch5,
4657 int flags) { 4655 int flags) {
4658 bool ascii = (flags & COPY_ASCII) != 0; 4656 bool ascii = (flags & COPY_ASCII) != 0;
4659 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; 4657 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
4660 4658
4661 if (dest_always_aligned && FLAG_debug_code) { 4659 if (dest_always_aligned && FLAG_debug_code) {
4662 // Check that destination is actually word aligned if the flag says 4660 // Check that destination is actually word aligned if the flag says
4663 // that it is. 4661 // that it is.
4664 __ tst(dest, Operand(kPointerAlignmentMask)); 4662 __ tst(dest, Operand(kPointerAlignmentMask));
4665 __ Check(eq, kDestinationOfCopyNotAligned); 4663 __ Check(eq, kDestinationOfCopyNotAligned);
4666 } 4664 }
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
4721 __ and_(src, src, Operand(~3)); // Round down to load previous word. 4719 __ and_(src, src, Operand(~3)); // Round down to load previous word.
4722 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); 4720 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
4723 // Store the "shift" most significant bits of scratch in the least 4721 // Store the "shift" most significant bits of scratch in the least
4724 // signficant bits (i.e., shift down by (32-shift)). 4722 // signficant bits (i.e., shift down by (32-shift)).
4725 __ rsb(scratch2, left_shift, Operand(32)); 4723 __ rsb(scratch2, left_shift, Operand(32));
4726 Register right_shift = scratch2; 4724 Register right_shift = scratch2;
4727 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); 4725 __ mov(scratch1, Operand(scratch1, LSR, right_shift));
4728 4726
4729 __ bind(&loop); 4727 __ bind(&loop);
4730 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); 4728 __ ldr(scratch3, MemOperand(src, 4, PostIndex));
4731 __ sub(scratch5, limit, Operand(dest));
4732 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); 4729 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
4733 __ str(scratch1, MemOperand(dest, 4, PostIndex)); 4730 __ str(scratch1, MemOperand(dest, 4, PostIndex));
4734 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); 4731 __ mov(scratch1, Operand(scratch3, LSR, right_shift));
4735 // Loop if four or more bytes left to copy. 4732 // Loop if four or more bytes left to copy.
4736 // Compare to eight, because we did the subtract before increasing dst. 4733 __ sub(scratch3, limit, Operand(dest));
4737 __ sub(scratch5, scratch5, Operand(8), SetCC); 4734 __ sub(scratch3, scratch3, Operand(4), SetCC);
4738 __ b(ge, &loop); 4735 __ b(ge, &loop);
4739 } 4736 }
4740 // There is now between zero and three bytes left to copy (negative that 4737 // There is now between zero and three bytes left to copy (negative that
4741 // number is in scratch5), and between one and three bytes already read into 4738 // number is in scratch3), and between one and three bytes already read into
4742 // scratch1 (eight times that number in scratch4). We may have read past 4739 // scratch1 (eight times that number in scratch4). We may have read past
4743 // the end of the string, but because objects are aligned, we have not read 4740 // the end of the string, but because objects are aligned, we have not read
4744 // past the end of the object. 4741 // past the end of the object.
4745 // Find the minimum of remaining characters to move and preloaded characters 4742 // Find the minimum of remaining characters to move and preloaded characters
4746 // and write those as bytes. 4743 // and write those as bytes.
4747 __ add(scratch5, scratch5, Operand(4), SetCC); 4744 __ add(scratch3, scratch3, Operand(4), SetCC);
4748 __ b(eq, &done); 4745 __ b(eq, &done);
4749 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); 4746 __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
4750 // Move minimum of bytes read and bytes left to copy to scratch4. 4747 // Move minimum of bytes read and bytes left to copy to scratch4.
4751 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); 4748 __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
4752 // Between one and three (value in scratch5) characters already read into 4749 // Between one and three (value in scratch3) characters already read into
4753 // scratch ready to write. 4750 // scratch ready to write.
4754 __ cmp(scratch5, Operand(2)); 4751 __ cmp(scratch3, Operand(2));
4755 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); 4752 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
4756 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); 4753 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
4757 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); 4754 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
4758 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); 4755 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
4759 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); 4756 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
4760 // Copy any remaining bytes. 4757 // Copy any remaining bytes.
4761 __ b(&byte_loop); 4758 __ b(&byte_loop);
4762 4759
4763 // Simple loop. 4760 // Simple loop.
4764 // Copy words from src to dst, until less than four bytes left. 4761 // Copy words from src to dst, until less than four bytes left.
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
5084 // Allocate new sliced string. At this point we do not reload the instance 5081 // Allocate new sliced string. At this point we do not reload the instance
5085 // type including the string encoding because we simply rely on the info 5082 // type including the string encoding because we simply rely on the info
5086 // provided by the original string. It does not matter if the original 5083 // provided by the original string. It does not matter if the original
5087 // string's encoding is wrong because we always have to recheck encoding of 5084 // string's encoding is wrong because we always have to recheck encoding of
5088 // the newly created string's parent anyways due to externalized strings. 5085 // the newly created string's parent anyways due to externalized strings.
5089 Label two_byte_slice, set_slice_header; 5086 Label two_byte_slice, set_slice_header;
5090 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); 5087 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
5091 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 5088 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5092 __ tst(r1, Operand(kStringEncodingMask)); 5089 __ tst(r1, Operand(kStringEncodingMask));
5093 __ b(eq, &two_byte_slice); 5090 __ b(eq, &two_byte_slice);
5094 __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); 5091 __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
5095 __ jmp(&set_slice_header); 5092 __ jmp(&set_slice_header);
5096 __ bind(&two_byte_slice); 5093 __ bind(&two_byte_slice);
5097 __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); 5094 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
5098 __ bind(&set_slice_header); 5095 __ bind(&set_slice_header);
5099 __ mov(r3, Operand(r3, LSL, 1)); 5096 __ mov(r3, Operand(r3, LSL, 1));
5100 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); 5097 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
5101 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); 5098 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
5102 __ jmp(&return_r0); 5099 __ jmp(&return_r0);
5103 5100
5104 __ bind(&copy_routine); 5101 __ bind(&copy_routine);
5105 } 5102 }
5106 5103
5107 // r5: underlying subject string 5104 // r5: underlying subject string
(...skipping 20 matching lines...) Expand all
5128 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 5125 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
5129 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5126 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5130 5127
5131 __ bind(&allocate_result); 5128 __ bind(&allocate_result);
5132 // Sequential acii string. Allocate the result. 5129 // Sequential acii string. Allocate the result.
5133 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); 5130 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
5134 __ tst(r1, Operand(kStringEncodingMask)); 5131 __ tst(r1, Operand(kStringEncodingMask));
5135 __ b(eq, &two_byte_sequential); 5132 __ b(eq, &two_byte_sequential);
5136 5133
5137 // Allocate and copy the resulting ASCII string. 5134 // Allocate and copy the resulting ASCII string.
5138 __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); 5135 __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
5139 5136
5140 // Locate first character of substring to copy. 5137 // Locate first character of substring to copy.
5141 __ add(r5, r5, r3); 5138 __ add(r5, r5, r3);
5142 // Locate first character of result. 5139 // Locate first character of result.
5143 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5140 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5144 5141
5145 // r0: result string 5142 // r0: result string
5146 // r1: first character of result string 5143 // r1: first character of result string
5147 // r2: result string length 5144 // r2: result string length
5148 // r5: first character of substring to copy 5145 // r5: first character of substring to copy
5149 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 5146 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5150 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, 5147 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
5151 COPY_ASCII | DEST_ALWAYS_ALIGNED); 5148 COPY_ASCII | DEST_ALWAYS_ALIGNED);
5152 __ jmp(&return_r0); 5149 __ jmp(&return_r0);
5153 5150
5154 // Allocate and copy the resulting two-byte string. 5151 // Allocate and copy the resulting two-byte string.
5155 __ bind(&two_byte_sequential); 5152 __ bind(&two_byte_sequential);
5156 __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); 5153 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
5157 5154
5158 // Locate first character of substring to copy. 5155 // Locate first character of substring to copy.
5159 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 5156 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5160 __ add(r5, r5, Operand(r3, LSL, 1)); 5157 __ add(r5, r5, Operand(r3, LSL, 1));
5161 // Locate first character of result. 5158 // Locate first character of result.
5162 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5159 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5163 5160
5164 // r0: result string. 5161 // r0: result string.
5165 // r1: first character of result. 5162 // r1: first character of result.
5166 // r2: result length. 5163 // r2: result length.
5167 // r5: first character of substring to copy. 5164 // r5: first character of substring to copy.
5168 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 5165 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5169 StringHelper::GenerateCopyCharactersLong( 5166 StringHelper::GenerateCopyCharactersLong(
5170 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); 5167 masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
5171 5168
5172 __ bind(&return_r0); 5169 __ bind(&return_r0);
5173 Counters* counters = masm->isolate()->counters(); 5170 Counters* counters = masm->isolate()->counters();
5174 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); 5171 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
5175 __ Drop(3); 5172 __ Drop(3);
5176 __ Ret(); 5173 __ Ret();
5177 5174
5178 // Just jump to runtime to create the sub string. 5175 // Just jump to runtime to create the sub string.
5179 __ bind(&runtime); 5176 __ bind(&runtime);
5180 __ TailCallRuntime(Runtime::kSubString, 3, 1); 5177 __ TailCallRuntime(Runtime::kSubString, 3, 1);
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
5426 __ cmp(r6, Operand(2)); 5423 __ cmp(r6, Operand(2));
5427 __ b(ne, &longer_than_two); 5424 __ b(ne, &longer_than_two);
5428 5425
5429 // Check that both strings are non-external ASCII strings. 5426 // Check that both strings are non-external ASCII strings.
5430 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5427 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
5431 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5428 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5432 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5429 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5433 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5430 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5434 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5431 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5435 } 5432 }
5436 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, 5433 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
5437 &call_runtime); 5434 &call_runtime);
5438 5435
5439 // Get the two characters forming the sub string. 5436 // Get the two characters forming the sub string.
5440 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); 5437 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
5441 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); 5438 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
5442 5439
5443 // Try to lookup two character string in string table. If it is not found 5440 // Try to lookup two character string in string table. If it is not found
5444 // just allocate a new one. 5441 // just allocate a new one.
5445 Label make_two_character_string; 5442 Label make_two_character_string;
5446 StringHelper::GenerateTwoCharacterStringTableProbe( 5443 StringHelper::GenerateTwoCharacterStringTableProbe(
5447 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); 5444 masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
5448 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5445 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5449 __ add(sp, sp, Operand(2 * kPointerSize)); 5446 __ add(sp, sp, Operand(2 * kPointerSize));
5450 __ Ret(); 5447 __ Ret();
5451 5448
5452 __ bind(&make_two_character_string); 5449 __ bind(&make_two_character_string);
5453 // Resulting string has length 2 and first chars of two strings 5450 // Resulting string has length 2 and first chars of two strings
5454 // are combined into single halfword in r2 register. 5451 // are combined into single halfword in r2 register.
5455 // So we can fill resulting string without two loops by a single 5452 // So we can fill resulting string without two loops by a single
5456 // halfword store instruction (which assumes that processor is 5453 // halfword store instruction (which assumes that processor is
5457 // in a little endian mode) 5454 // in a little endian mode)
(...skipping 24 matching lines...) Expand all
5482 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5479 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5483 } 5480 }
5484 Label non_ascii, allocated, ascii_data; 5481 Label non_ascii, allocated, ascii_data;
5485 STATIC_ASSERT(kTwoByteStringTag == 0); 5482 STATIC_ASSERT(kTwoByteStringTag == 0);
5486 __ tst(r4, Operand(kStringEncodingMask)); 5483 __ tst(r4, Operand(kStringEncodingMask));
5487 __ tst(r5, Operand(kStringEncodingMask), ne); 5484 __ tst(r5, Operand(kStringEncodingMask), ne);
5488 __ b(eq, &non_ascii); 5485 __ b(eq, &non_ascii);
5489 5486
5490 // Allocate an ASCII cons string. 5487 // Allocate an ASCII cons string.
5491 __ bind(&ascii_data); 5488 __ bind(&ascii_data);
5492 __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); 5489 __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
5493 __ bind(&allocated); 5490 __ bind(&allocated);
5494 // Fill the fields of the cons string. 5491 // Fill the fields of the cons string.
5495 Label skip_write_barrier, after_writing; 5492 Label skip_write_barrier, after_writing;
5496 ExternalReference high_promotion_mode = ExternalReference:: 5493 ExternalReference high_promotion_mode = ExternalReference::
5497 new_space_high_promotion_mode_active_address(masm->isolate()); 5494 new_space_high_promotion_mode_active_address(masm->isolate());
5498 __ mov(r4, Operand(high_promotion_mode)); 5495 __ mov(r4, Operand(high_promotion_mode));
5499 __ ldr(r4, MemOperand(r4, 0)); 5496 __ ldr(r4, MemOperand(r4, 0));
5500 __ cmp(r4, Operand::Zero()); 5497 __ cmp(r4, Operand::Zero());
5501 __ b(eq, &skip_write_barrier); 5498 __ b(eq, &skip_write_barrier);
5502 5499
5503 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); 5500 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
5504 __ RecordWriteField(r7, 5501 __ RecordWriteField(r3,
5505 ConsString::kFirstOffset, 5502 ConsString::kFirstOffset,
5506 r0, 5503 r0,
5507 r4, 5504 r4,
5508 kLRHasNotBeenSaved, 5505 kLRHasNotBeenSaved,
5509 kDontSaveFPRegs); 5506 kDontSaveFPRegs);
5510 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); 5507 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
5511 __ RecordWriteField(r7, 5508 __ RecordWriteField(r3,
5512 ConsString::kSecondOffset, 5509 ConsString::kSecondOffset,
5513 r1, 5510 r1,
5514 r4, 5511 r4,
5515 kLRHasNotBeenSaved, 5512 kLRHasNotBeenSaved,
5516 kDontSaveFPRegs); 5513 kDontSaveFPRegs);
5517 __ jmp(&after_writing); 5514 __ jmp(&after_writing);
5518 5515
5519 __ bind(&skip_write_barrier); 5516 __ bind(&skip_write_barrier);
5520 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); 5517 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
5521 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); 5518 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
5522 5519
5523 __ bind(&after_writing); 5520 __ bind(&after_writing);
5524 5521
5525 __ mov(r0, Operand(r7)); 5522 __ mov(r0, Operand(r3));
5526 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5523 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5527 __ add(sp, sp, Operand(2 * kPointerSize)); 5524 __ add(sp, sp, Operand(2 * kPointerSize));
5528 __ Ret(); 5525 __ Ret();
5529 5526
5530 __ bind(&non_ascii); 5527 __ bind(&non_ascii);
5531 // At least one of the strings is two-byte. Check whether it happens 5528 // At least one of the strings is two-byte. Check whether it happens
5532 // to contain only one byte characters. 5529 // to contain only one byte characters.
5533 // r4: first instance type. 5530 // r4: first instance type.
5534 // r5: second instance type. 5531 // r5: second instance type.
5535 __ tst(r4, Operand(kOneByteDataHintMask)); 5532 __ tst(r4, Operand(kOneByteDataHintMask));
5536 __ tst(r5, Operand(kOneByteDataHintMask), ne); 5533 __ tst(r5, Operand(kOneByteDataHintMask), ne);
5537 __ b(ne, &ascii_data); 5534 __ b(ne, &ascii_data);
5538 __ eor(r4, r4, Operand(r5)); 5535 __ eor(r4, r4, Operand(r5));
5539 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); 5536 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
5540 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); 5537 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
5541 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); 5538 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
5542 __ b(eq, &ascii_data); 5539 __ b(eq, &ascii_data);
5543 5540
5544 // Allocate a two byte cons string. 5541 // Allocate a two byte cons string.
5545 __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); 5542 __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
5546 __ jmp(&allocated); 5543 __ jmp(&allocated);
5547 5544
5548 // We cannot encounter sliced strings or cons strings here since: 5545 // We cannot encounter sliced strings or cons strings here since:
5549 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); 5546 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
5550 // Handle creating a flat result from either external or sequential strings. 5547 // Handle creating a flat result from either external or sequential strings.
5551 // Locate the first characters' locations. 5548 // Locate the first characters' locations.
5552 // r0: first string 5549 // r0: first string
5553 // r1: second string 5550 // r1: second string
5554 // r2: length of first string 5551 // r2: length of first string
5555 // r3: length of second string 5552 // r3: length of second string
5556 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5553 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5557 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5554 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5558 // r6: sum of lengths. 5555 // r6: sum of lengths.
5559 Label first_prepared, second_prepared; 5556 Label first_prepared, second_prepared;
5560 __ bind(&string_add_flat_result); 5557 __ bind(&string_add_flat_result);
5561 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5558 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
5562 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5559 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5563 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5560 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5564 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5561 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5565 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5562 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5566 } 5563 }
5567 5564
5568 // Check whether both strings have same encoding 5565 // Check whether both strings have same encoding
5569 __ eor(r7, r4, Operand(r5)); 5566 __ eor(ip, r4, Operand(r5));
5570 __ tst(r7, Operand(kStringEncodingMask)); 5567 ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
5568 __ tst(ip, Operand(kStringEncodingMask));
5571 __ b(ne, &call_runtime); 5569 __ b(ne, &call_runtime);
5572 5570
5573 STATIC_ASSERT(kSeqStringTag == 0); 5571 STATIC_ASSERT(kSeqStringTag == 0);
5574 __ tst(r4, Operand(kStringRepresentationMask)); 5572 __ tst(r4, Operand(kStringRepresentationMask));
5575 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5573 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5576 __ add(r7, 5574 __ add(r6,
5577 r0, 5575 r0,
5578 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), 5576 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
5579 LeaveCC, 5577 LeaveCC,
5580 eq); 5578 eq);
5581 __ b(eq, &first_prepared); 5579 __ b(eq, &first_prepared);
5582 // External string: rule out short external string and load string resource. 5580 // External string: rule out short external string and load string resource.
5583 STATIC_ASSERT(kShortExternalStringTag != 0); 5581 STATIC_ASSERT(kShortExternalStringTag != 0);
5584 __ tst(r4, Operand(kShortExternalStringMask)); 5582 __ tst(r4, Operand(kShortExternalStringMask));
5585 __ b(ne, &call_runtime); 5583 __ b(ne, &call_runtime);
5586 __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); 5584 __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
5587 __ bind(&first_prepared); 5585 __ bind(&first_prepared);
5588 5586
5589 STATIC_ASSERT(kSeqStringTag == 0); 5587 STATIC_ASSERT(kSeqStringTag == 0);
5590 __ tst(r5, Operand(kStringRepresentationMask)); 5588 __ tst(r5, Operand(kStringRepresentationMask));
5591 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5589 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5592 __ add(r1, 5590 __ add(r1,
5593 r1, 5591 r1,
5594 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), 5592 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
5595 LeaveCC, 5593 LeaveCC,
5596 eq); 5594 eq);
5597 __ b(eq, &second_prepared); 5595 __ b(eq, &second_prepared);
5598 // External string: rule out short external string and load string resource. 5596 // External string: rule out short external string and load string resource.
5599 STATIC_ASSERT(kShortExternalStringTag != 0); 5597 STATIC_ASSERT(kShortExternalStringTag != 0);
5600 __ tst(r5, Operand(kShortExternalStringMask)); 5598 __ tst(r5, Operand(kShortExternalStringMask));
5601 __ b(ne, &call_runtime); 5599 __ b(ne, &call_runtime);
5602 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); 5600 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
5603 __ bind(&second_prepared); 5601 __ bind(&second_prepared);
5604 5602
5605 Label non_ascii_string_add_flat_result; 5603 Label non_ascii_string_add_flat_result;
5606 // r7: first character of first string 5604 // r6: first character of first string
5607 // r1: first character of second string 5605 // r1: first character of second string
5608 // r2: length of first string. 5606 // r2: length of first string.
5609 // r3: length of second string. 5607 // r3: length of second string.
5610 // r6: sum of lengths.
5611 // Both strings have the same encoding. 5608 // Both strings have the same encoding.
5612 STATIC_ASSERT(kTwoByteStringTag == 0); 5609 STATIC_ASSERT(kTwoByteStringTag == 0);
5613 __ tst(r5, Operand(kStringEncodingMask)); 5610 __ tst(r5, Operand(kStringEncodingMask));
5614 __ b(eq, &non_ascii_string_add_flat_result); 5611 __ b(eq, &non_ascii_string_add_flat_result);
5615 5612
5616 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); 5613 __ add(r2, r2, Operand(r3));
5617 __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5614 __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
5615 __ sub(r2, r2, Operand(r3));
5616 __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5618 // r0: result string. 5617 // r0: result string.
5619 // r7: first character of first string. 5618 // r6: first character of first string.
5620 // r1: first character of second string. 5619 // r1: first character of second string.
5621 // r2: length of first string. 5620 // r2: length of first string.
5622 // r3: length of second string. 5621 // r3: length of second string.
5623 // r6: first character of result. 5622 // r5: first character of result.
5624 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); 5623 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
5625 // r6: next character of result. 5624 // r5: next character of result.
5626 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); 5625 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
5627 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5626 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5628 __ add(sp, sp, Operand(2 * kPointerSize)); 5627 __ add(sp, sp, Operand(2 * kPointerSize));
5629 __ Ret(); 5628 __ Ret();
5630 5629
5631 __ bind(&non_ascii_string_add_flat_result); 5630 __ bind(&non_ascii_string_add_flat_result);
5632 __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); 5631 __ add(r2, r2, Operand(r3));
5633 __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5632 __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
5633 __ sub(r2, r2, Operand(r3));
5634 __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5634 // r0: result string. 5635 // r0: result string.
5635 // r7: first character of first string. 5636 // r6: first character of first string.
5636 // r1: first character of second string. 5637 // r1: first character of second string.
5637 // r2: length of first string. 5638 // r2: length of first string.
5638 // r3: length of second string. 5639 // r3: length of second string.
5639 // r6: first character of result. 5640 // r5: first character of result.
5640 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); 5641 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
5641 // r6: next character of result. 5642 // r5: next character of result.
5642 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); 5643 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
5643 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5644 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5644 __ add(sp, sp, Operand(2 * kPointerSize)); 5645 __ add(sp, sp, Operand(2 * kPointerSize));
5645 __ Ret(); 5646 __ Ret();
5646 5647
5647 // Just jump to runtime to add the two strings. 5648 // Just jump to runtime to add the two strings.
5648 __ bind(&call_runtime); 5649 __ bind(&call_runtime);
5649 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { 5650 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
5650 GenerateRegisterArgsPop(masm); 5651 GenerateRegisterArgsPop(masm);
5651 // Build a frame 5652 // Build a frame
5652 { 5653 {
(...skipping 649 matching lines...) Expand 10 before | Expand all | Expand 10 after
6302 struct AheadOfTimeWriteBarrierStubList { 6303 struct AheadOfTimeWriteBarrierStubList {
6303 Register object, value, address; 6304 Register object, value, address;
6304 RememberedSetAction action; 6305 RememberedSetAction action;
6305 }; 6306 };
6306 6307
6307 6308
6308 #define REG(Name) { kRegister_ ## Name ## _Code } 6309 #define REG(Name) { kRegister_ ## Name ## _Code }
6309 6310
6310 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { 6311 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
6311 // Used in RegExpExecStub. 6312 // Used in RegExpExecStub.
6312 { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, 6313 { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET },
6313 // Used in CompileArrayPushCall. 6314 // Used in CompileArrayPushCall.
6314 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. 6315 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
6315 // Also used in KeyedStoreIC::GenerateGeneric. 6316 // Also used in KeyedStoreIC::GenerateGeneric.
6316 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, 6317 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
6317 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. 6318 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
6318 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, 6319 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
6319 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, 6320 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
6320 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. 6321 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
6321 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, 6322 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
6322 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, 6323 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
6323 // KeyedStoreStubCompiler::GenerateStoreFastElement. 6324 // KeyedStoreStubCompiler::GenerateStoreFastElement.
6324 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, 6325 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
6325 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, 6326 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
6326 // ElementsTransitionGenerator::GenerateMapChangeElementTransition 6327 // ElementsTransitionGenerator::GenerateMapChangeElementTransition
6327 // and ElementsTransitionGenerator::GenerateSmiToDouble 6328 // and ElementsTransitionGenerator::GenerateSmiToDouble
6328 // and ElementsTransitionGenerator::GenerateDoubleToObject 6329 // and ElementsTransitionGenerator::GenerateDoubleToObject
6329 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, 6330 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
6330 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, 6331 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
6331 // ElementsTransitionGenerator::GenerateDoubleToObject 6332 // ElementsTransitionGenerator::GenerateDoubleToObject
6332 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, 6333 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
6333 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, 6334 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
6334 // StoreArrayLiteralElementStub::Generate 6335 // StoreArrayLiteralElementStub::Generate
6335 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, 6336 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
6336 // FastNewClosureStub::Generate 6337 // FastNewClosureStub::Generate
6337 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, 6338 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
6338 // StringAddStub::Generate 6339 // StringAddStub::Generate
6339 { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, 6340 { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
6340 { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, 6341 { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
6341 // Null termination. 6342 // Null termination.
6342 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 6343 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6343 }; 6344 };
6344 6345
6345 #undef REG 6346 #undef REG
6346 6347
6347 6348
6348 bool RecordWriteStub::IsPregenerated(Isolate* isolate) { 6349 bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
6349 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 6350 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6350 !entry->object.is(no_reg); 6351 !entry->object.is(no_reg);
(...skipping 711 matching lines...) Expand 10 before | Expand all | Expand 10 after
7062 __ bind(&fast_elements_case); 7063 __ bind(&fast_elements_case);
7063 GenerateCase(masm, FAST_ELEMENTS); 7064 GenerateCase(masm, FAST_ELEMENTS);
7064 } 7065 }
7065 7066
7066 7067
7067 #undef __ 7068 #undef __
7068 7069
7069 } } // namespace v8::internal 7070 } } // namespace v8::internal
7070 7071
7071 #endif // V8_TARGET_ARCH_ARM 7072 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698