OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
326 Counters* counters = masm->isolate()->counters(); | 326 Counters* counters = masm->isolate()->counters(); |
327 | 327 |
328 Label gc; | 328 Label gc; |
329 | 329 |
330 // Pop the function info from the stack. | 330 // Pop the function info from the stack. |
331 __ pop(r3); | 331 __ pop(r3); |
332 | 332 |
333 // Attempt to allocate new JSFunction in new space. | 333 // Attempt to allocate new JSFunction in new space. |
334 __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT); | 334 __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT); |
335 | 335 |
336 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); | 336 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r4); |
337 | 337 |
338 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); | 338 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); |
339 | 339 |
340 // Compute the function map in the current native context and set that | 340 // Compute the function map in the current native context and set that |
341 // as the map of the allocated object. | 341 // as the map of the allocated object. |
342 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 342 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
343 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); | 343 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); |
344 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index))); | 344 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index))); |
345 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset)); | 345 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset)); |
346 | 346 |
(...skipping 24 matching lines...) Expand all Loading... | |
371 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); | 371 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); |
372 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); | 372 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); |
373 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 373 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
374 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); | 374 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); |
375 | 375 |
376 // Return result. The argument function info has been popped already. | 376 // Return result. The argument function info has been popped already. |
377 __ Ret(); | 377 __ Ret(); |
378 | 378 |
379 __ bind(&check_optimized); | 379 __ bind(&check_optimized); |
380 | 380 |
381 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7); | 381 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r4); |
382 | 382 |
383 // r2 holds native context, r1 points to fixed array of 3-element entries | 383 // r2 holds native context, r1 points to fixed array of 3-element entries |
384 // (native context, optimized code, literals). | 384 // (native context, optimized code, literals). |
385 // The optimized code map must never be empty, so check the first elements. | 385 // The optimized code map must never be empty, so check the first elements. |
386 Label install_optimized; | 386 Label install_optimized; |
387 // Speculatively move code object into r4. | 387 // Speculatively move code object into r4. |
388 __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot)); | 388 __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot)); |
389 __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot)); | 389 __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot)); |
390 __ cmp(r2, r5); | 390 __ cmp(r2, r5); |
391 __ b(eq, &install_optimized); | 391 __ b(eq, &install_optimized); |
(...skipping 12 matching lines...) Expand all Loading... | |
404 __ cmp(r2, r5); | 404 __ cmp(r2, r5); |
405 __ b(ne, &loop); | 405 __ b(ne, &loop); |
406 // Hit: fetch the optimized code. | 406 // Hit: fetch the optimized code. |
407 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 407 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
408 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); | 408 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); |
409 __ add(r5, r5, Operand(kPointerSize)); | 409 __ add(r5, r5, Operand(kPointerSize)); |
410 __ ldr(r4, MemOperand(r5)); | 410 __ ldr(r4, MemOperand(r5)); |
411 | 411 |
412 __ bind(&install_optimized); | 412 __ bind(&install_optimized); |
413 __ IncrementCounter(counters->fast_new_closure_install_optimized(), | 413 __ IncrementCounter(counters->fast_new_closure_install_optimized(), |
414 1, r6, r7); | 414 1, r6, r5); |
415 | 415 |
416 // TODO(fschneider): Idea: store proper code pointers in the map and either | 416 // TODO(fschneider): Idea: store proper code pointers in the map and either |
417 // unmangle them on marking or do nothing as the whole map is discarded on | 417 // unmangle them on marking or do nothing as the whole map is discarded on |
418 // major GC anyway. | 418 // major GC anyway. |
419 __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); | 419 __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); |
420 __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); | 420 __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); |
421 | 421 |
422 // Now link a function into a list of optimized functions. | 422 // Now link a function into a list of optimized functions. |
423 __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); | 423 __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); |
424 | 424 |
(...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
842 } else { | 842 } else { |
843 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 843 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
844 // the runtime. | 844 // the runtime. |
845 __ b(ne, slow); | 845 __ b(ne, slow); |
846 } | 846 } |
847 | 847 |
848 // Lhs is a smi, rhs is a number. | 848 // Lhs is a smi, rhs is a number. |
849 // Convert lhs to a double in d7. | 849 // Convert lhs to a double in d7. |
850 __ SmiToDouble(d7, lhs); | 850 __ SmiToDouble(d7, lhs); |
851 // Load the double from rhs, tagged HeapNumber r0, to d6. | 851 // Load the double from rhs, tagged HeapNumber r0, to d6. |
852 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 852 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
853 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
854 | 853 |
855 // We now have both loaded as doubles but we can skip the lhs nan check | 854 // We now have both loaded as doubles but we can skip the lhs nan check |
856 // since it's a smi. | 855 // since it's a smi. |
857 __ jmp(lhs_not_nan); | 856 __ jmp(lhs_not_nan); |
858 | 857 |
859 __ bind(&rhs_is_smi); | 858 __ bind(&rhs_is_smi); |
860 // Rhs is a smi. Check whether the non-smi lhs is a heap number. | 859 // Rhs is a smi. Check whether the non-smi lhs is a heap number. |
861 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | 860 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); |
862 if (strict) { | 861 if (strict) { |
863 // If lhs is not a number and rhs is a smi then strict equality cannot | 862 // If lhs is not a number and rhs is a smi then strict equality cannot |
864 // succeed. Return non-equal. | 863 // succeed. Return non-equal. |
865 // If lhs is r0 then there is already a non zero value in it. | 864 // If lhs is r0 then there is already a non zero value in it. |
866 if (!lhs.is(r0)) { | 865 if (!lhs.is(r0)) { |
867 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 866 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
868 } | 867 } |
869 __ Ret(ne); | 868 __ Ret(ne); |
870 } else { | 869 } else { |
871 // Smi compared non-strictly with a non-smi non-heap-number. Call | 870 // Smi compared non-strictly with a non-smi non-heap-number. Call |
872 // the runtime. | 871 // the runtime. |
873 __ b(ne, slow); | 872 __ b(ne, slow); |
874 } | 873 } |
875 | 874 |
876 // Rhs is a smi, lhs is a heap number. | 875 // Rhs is a smi, lhs is a heap number. |
877 // Load the double from lhs, tagged HeapNumber r1, to d7. | 876 // Load the double from lhs, tagged HeapNumber r1, to d7. |
878 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 877 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
879 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
880 // Convert rhs to a double in d6 . | 878 // Convert rhs to a double in d6 . |
881 __ SmiToDouble(d6, rhs); | 879 __ SmiToDouble(d6, rhs); |
882 // Fall through to both_loaded_as_doubles. | 880 // Fall through to both_loaded_as_doubles. |
883 } | 881 } |
884 | 882 |
885 | 883 |
886 // See comment at call site. | 884 // See comment at call site. |
887 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 885 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
888 Register lhs, | 886 Register lhs, |
889 Register rhs) { | 887 Register rhs) { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
937 (lhs.is(r1) && rhs.is(r0))); | 935 (lhs.is(r1) && rhs.is(r0))); |
938 | 936 |
939 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 937 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); |
940 __ b(ne, not_heap_numbers); | 938 __ b(ne, not_heap_numbers); |
941 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 939 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
942 __ cmp(r2, r3); | 940 __ cmp(r2, r3); |
943 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | 941 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. |
944 | 942 |
945 // Both are heap numbers. Load them up then jump to the code we have | 943 // Both are heap numbers. Load them up then jump to the code we have |
946 // for that. | 944 // for that. |
947 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 945 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
948 __ vldr(d6, r7, HeapNumber::kValueOffset); | 946 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
949 __ sub(r7, lhs, Operand(kHeapObjectTag)); | |
950 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
951 __ jmp(both_loaded_as_doubles); | 947 __ jmp(both_loaded_as_doubles); |
952 } | 948 } |
953 | 949 |
954 | 950 |
955 // Fast negative check for internalized-to-internalized equality. | 951 // Fast negative check for internalized-to-internalized equality. |
956 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 952 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
957 Register lhs, | 953 Register lhs, |
958 Register rhs, | 954 Register rhs, |
959 Label* possible_strings, | 955 Label* possible_strings, |
960 Label* not_both_strings) { | 956 Label* not_both_strings) { |
(...skipping 409 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1370 } | 1366 } |
1371 | 1367 |
1372 | 1368 |
1373 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( | 1369 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
1374 MacroAssembler* masm) { | 1370 MacroAssembler* masm) { |
1375 UNIMPLEMENTED(); | 1371 UNIMPLEMENTED(); |
1376 } | 1372 } |
1377 | 1373 |
1378 | 1374 |
1379 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, | 1375 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, |
1380 Token::Value op) { | 1376 Token::Value op, |
1377 Register scratch1, | |
1378 Register scratch2) { | |
1381 Register left = r1; | 1379 Register left = r1; |
1382 Register right = r0; | 1380 Register right = r0; |
1383 Register scratch1 = r7; | |
1384 Register scratch2 = r9; | |
1385 | 1381 |
Rodolph Perfetta
2013/07/30 15:19:08
assert that left, right, ip, scratch1 and scratch2
rmcilroy
2013/07/30 17:12:37
Done.
| |
1386 ASSERT(right.is(r0)); | 1382 ASSERT(right.is(r0)); |
1387 STATIC_ASSERT(kSmiTag == 0); | 1383 STATIC_ASSERT(kSmiTag == 0); |
1388 | 1384 |
1389 Label not_smi_result; | 1385 Label not_smi_result; |
1390 switch (op) { | 1386 switch (op) { |
1391 case Token::ADD: | 1387 case Token::ADD: |
1392 __ add(right, left, Operand(right), SetCC); // Add optimistically. | 1388 __ add(right, left, Operand(right), SetCC); // Add optimistically. |
1393 __ Ret(vc); | 1389 __ Ret(vc); |
1394 __ sub(right, right, Operand(left)); // Revert optimistic add. | 1390 __ sub(right, right, Operand(left)); // Revert optimistic add. |
1395 break; | 1391 break; |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1591 | 1587 |
1592 | 1588 |
1593 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, | 1589 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
1594 BinaryOpIC::TypeInfo left_type, | 1590 BinaryOpIC::TypeInfo left_type, |
1595 BinaryOpIC::TypeInfo right_type, | 1591 BinaryOpIC::TypeInfo right_type, |
1596 bool smi_operands, | 1592 bool smi_operands, |
1597 Label* not_numbers, | 1593 Label* not_numbers, |
1598 Label* gc_required, | 1594 Label* gc_required, |
1599 Label* miss, | 1595 Label* miss, |
1600 Token::Value op, | 1596 Token::Value op, |
1601 OverwriteMode mode) { | 1597 OverwriteMode mode, |
1598 Register scratch1, | |
1599 Register scratch2, | |
1600 Register scratch3, | |
1601 Register scratch4) { | |
1602 Register left = r1; | 1602 Register left = r1; |
1603 Register right = r0; | 1603 Register right = r0; |
1604 Register scratch1 = r6; | 1604 Register result = scratch3; |
1605 Register scratch2 = r7; | |
1606 Register scratch3 = r4; | |
1607 | 1605 |
Rodolph Perfetta
2013/07/30 15:19:08
assert the various scratches are not aliased.
rmcilroy
2013/07/30 17:12:37
Done.
| |
1608 ASSERT(smi_operands || (not_numbers != NULL)); | 1606 ASSERT(smi_operands || (not_numbers != NULL)); |
1609 if (smi_operands) { | 1607 if (smi_operands) { |
1610 __ AssertSmi(left); | 1608 __ AssertSmi(left); |
1611 __ AssertSmi(right); | 1609 __ AssertSmi(right); |
1612 } | 1610 } |
1613 if (left_type == BinaryOpIC::SMI) { | 1611 if (left_type == BinaryOpIC::SMI) { |
1614 __ JumpIfNotSmi(left, miss); | 1612 __ JumpIfNotSmi(left, miss); |
1615 } | 1613 } |
1616 if (right_type == BinaryOpIC::SMI) { | 1614 if (right_type == BinaryOpIC::SMI) { |
1617 __ JumpIfNotSmi(right, miss); | 1615 __ JumpIfNotSmi(right, miss); |
1618 } | 1616 } |
1619 | 1617 |
1620 Register heap_number_map = r9; | 1618 Register heap_number_map = scratch4; |
1621 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1619 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
1622 | 1620 |
1623 switch (op) { | 1621 switch (op) { |
1624 case Token::ADD: | 1622 case Token::ADD: |
1625 case Token::SUB: | 1623 case Token::SUB: |
1626 case Token::MUL: | 1624 case Token::MUL: |
1627 case Token::DIV: | 1625 case Token::DIV: |
1628 case Token::MOD: { | 1626 case Token::MOD: { |
1629 // Allocate new heap number for result. | 1627 // Allocate new heap number for result. |
1630 Register result = r5; | |
1631 BinaryOpStub_GenerateHeapResultAllocation( | 1628 BinaryOpStub_GenerateHeapResultAllocation( |
1632 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 1629 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
1633 | 1630 |
1634 // Load left and right operands into d0 and d1. | 1631 // Load left and right operands into d0 and d1. |
1635 if (smi_operands) { | 1632 if (smi_operands) { |
1636 __ SmiToDouble(d1, right); | 1633 __ SmiToDouble(d1, right); |
1637 __ SmiToDouble(d0, left); | 1634 __ SmiToDouble(d0, left); |
1638 } else { | 1635 } else { |
1639 // Load right operand into d1. | 1636 // Load right operand into d1. |
1640 if (right_type == BinaryOpIC::INT32) { | 1637 if (right_type == BinaryOpIC::INT32) { |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1743 default: | 1740 default: |
1744 UNREACHABLE(); | 1741 UNREACHABLE(); |
1745 } | 1742 } |
1746 | 1743 |
1747 // Check that the *signed* result fits in a smi. | 1744 // Check that the *signed* result fits in a smi. |
1748 __ TrySmiTag(r0, r2, &result_not_a_smi); | 1745 __ TrySmiTag(r0, r2, &result_not_a_smi); |
1749 __ Ret(); | 1746 __ Ret(); |
1750 | 1747 |
1751 // Allocate new heap number for result. | 1748 // Allocate new heap number for result. |
1752 __ bind(&result_not_a_smi); | 1749 __ bind(&result_not_a_smi); |
1753 Register result = r5; | |
1754 if (smi_operands) { | 1750 if (smi_operands) { |
1755 __ AllocateHeapNumber( | 1751 __ AllocateHeapNumber( |
1756 result, scratch1, scratch2, heap_number_map, gc_required); | 1752 result, scratch1, scratch2, heap_number_map, gc_required); |
1757 } else { | 1753 } else { |
1758 BinaryOpStub_GenerateHeapResultAllocation( | 1754 BinaryOpStub_GenerateHeapResultAllocation( |
1759 masm, result, heap_number_map, scratch1, scratch2, gc_required, | 1755 masm, result, heap_number_map, scratch1, scratch2, gc_required, |
1760 mode); | 1756 mode); |
1761 } | 1757 } |
1762 | 1758 |
1763 // r2: Answer as signed int32. | 1759 // r2: Answer as signed int32. |
1764 // r5: Heap number to write answer into. | 1760 // result: Heap number to write answer into. |
1765 | 1761 |
1766 // Nothing can go wrong now, so move the heap number to r0, which is the | 1762 // Nothing can go wrong now, so move the heap number to r0, which is the |
1767 // result. | 1763 // result. |
1768 __ mov(r0, Operand(r5)); | 1764 __ mov(r0, Operand(result)); |
1769 | 1765 |
1770 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | 1766 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
1771 // mentioned above SHR needs to always produce a positive result. | 1767 // mentioned above SHR needs to always produce a positive result. |
1772 __ vmov(s0, r2); | 1768 __ vmov(s0, r2); |
1773 if (op == Token::SHR) { | 1769 if (op == Token::SHR) { |
1774 __ vcvt_f64_u32(d0, s0); | 1770 __ vcvt_f64_u32(d0, s0); |
1775 } else { | 1771 } else { |
1776 __ vcvt_f64_s32(d0, s0); | 1772 __ vcvt_f64_s32(d0, s0); |
1777 } | 1773 } |
1778 __ sub(r3, r0, Operand(kHeapObjectTag)); | 1774 __ sub(r3, r0, Operand(kHeapObjectTag)); |
(...skipping 10 matching lines...) Expand all Loading... | |
1789 // Generate the smi code. If the operation on smis are successful this return is | 1785 // Generate the smi code. If the operation on smis are successful this return is |
1790 // generated. If the result is not a smi and heap number allocation is not | 1786 // generated. If the result is not a smi and heap number allocation is not |
1791 // requested the code falls through. If number allocation is requested but a | 1787 // requested the code falls through. If number allocation is requested but a |
1792 // heap number cannot be allocated the code jumps to the label gc_required. | 1788 // heap number cannot be allocated the code jumps to the label gc_required. |
1793 void BinaryOpStub_GenerateSmiCode( | 1789 void BinaryOpStub_GenerateSmiCode( |
1794 MacroAssembler* masm, | 1790 MacroAssembler* masm, |
1795 Label* use_runtime, | 1791 Label* use_runtime, |
1796 Label* gc_required, | 1792 Label* gc_required, |
1797 Token::Value op, | 1793 Token::Value op, |
1798 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | 1794 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
1799 OverwriteMode mode) { | 1795 OverwriteMode mode, |
1796 Register scratch1, | |
1797 Register scratch2, | |
1798 Register scratch3, | |
1799 Register scratch4) { | |
1800 Label not_smis; | 1800 Label not_smis; |
1801 | 1801 |
1802 Register left = r1; | 1802 Register left = r1; |
1803 Register right = r0; | 1803 Register right = r0; |
1804 Register scratch1 = r7; | |
1805 | 1804 |
Rodolph Perfetta
2013/07/30 15:19:08
assert scratchx do not alias with left and right.
rmcilroy
2013/07/30 17:12:37
Done.
| |
1806 // Perform combined smi check on both operands. | 1805 // Perform combined smi check on both operands. |
1807 __ orr(scratch1, left, Operand(right)); | 1806 __ orr(scratch1, left, Operand(right)); |
1808 __ JumpIfNotSmi(scratch1, ¬_smis); | 1807 __ JumpIfNotSmi(scratch1, ¬_smis); |
1809 | 1808 |
1810 // If the smi-smi operation results in a smi return is generated. | 1809 // If the smi-smi operation results in a smi return is generated. |
1811 BinaryOpStub_GenerateSmiSmiOperation(masm, op); | 1810 BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2); |
1812 | 1811 |
1813 // If heap number results are possible generate the result in an allocated | 1812 // If heap number results are possible generate the result in an allocated |
1814 // heap number. | 1813 // heap number. |
1815 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { | 1814 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
1816 BinaryOpStub_GenerateFPOperation( | 1815 BinaryOpStub_GenerateFPOperation( |
1817 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, | 1816 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
1818 use_runtime, gc_required, ¬_smis, op, mode); | 1817 use_runtime, gc_required, ¬_smis, op, mode, scratch2, scratch3, |
1818 scratch1, scratch4); | |
1819 } | 1819 } |
1820 __ bind(¬_smis); | 1820 __ bind(¬_smis); |
1821 } | 1821 } |
1822 | 1822 |
1823 | 1823 |
1824 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 1824 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
1825 Label right_arg_changed, call_runtime; | 1825 Label right_arg_changed, call_runtime; |
1826 | 1826 |
1827 if (op_ == Token::MOD && encoded_right_arg_.has_value) { | 1827 if (op_ == Token::MOD && encoded_right_arg_.has_value) { |
1828 // It is guaranteed that the value will fit into a Smi, because if it | 1828 // It is guaranteed that the value will fit into a Smi, because if it |
1829 // didn't, we wouldn't be here, see BinaryOp_Patch. | 1829 // didn't, we wouldn't be here, see BinaryOp_Patch. |
1830 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); | 1830 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); |
1831 __ b(ne, &right_arg_changed); | 1831 __ b(ne, &right_arg_changed); |
1832 } | 1832 } |
1833 | 1833 |
1834 if (result_type_ == BinaryOpIC::UNINITIALIZED || | 1834 if (result_type_ == BinaryOpIC::UNINITIALIZED || |
1835 result_type_ == BinaryOpIC::SMI) { | 1835 result_type_ == BinaryOpIC::SMI) { |
1836 // Only allow smi results. | 1836 // Only allow smi results. |
1837 BinaryOpStub_GenerateSmiCode( | 1837 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_, |
1838 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); | 1838 NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9); |
1839 } else { | 1839 } else { |
1840 // Allow heap number result and don't make a transition if a heap number | 1840 // Allow heap number result and don't make a transition if a heap number |
1841 // cannot be allocated. | 1841 // cannot be allocated. |
1842 BinaryOpStub_GenerateSmiCode( | 1842 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_, |
1843 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, | 1843 ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9); |
1844 mode_); | |
1845 } | 1844 } |
1846 | 1845 |
1847 // Code falls through if the result is not returned as either a smi or heap | 1846 // Code falls through if the result is not returned as either a smi or heap |
1848 // number. | 1847 // number. |
1849 __ bind(&right_arg_changed); | 1848 __ bind(&right_arg_changed); |
1850 GenerateTypeTransition(masm); | 1849 GenerateTypeTransition(masm); |
1851 | 1850 |
1852 __ bind(&call_runtime); | 1851 __ bind(&call_runtime); |
1853 { | 1852 { |
1854 FrameScope scope(masm, StackFrame::INTERNAL); | 1853 FrameScope scope(masm, StackFrame::INTERNAL); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1888 __ bind(&call_runtime); | 1887 __ bind(&call_runtime); |
1889 GenerateTypeTransition(masm); | 1888 GenerateTypeTransition(masm); |
1890 } | 1889 } |
1891 | 1890 |
1892 | 1891 |
1893 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 1892 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
1894 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); | 1893 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
1895 | 1894 |
1896 Register left = r1; | 1895 Register left = r1; |
1897 Register right = r0; | 1896 Register right = r0; |
1898 Register scratch1 = r7; | 1897 Register scratch1 = r4; |
1899 Register scratch2 = r9; | 1898 Register scratch2 = r9; |
1899 Register scratch3 = r5; | |
1900 LowDwVfpRegister double_scratch = d0; | 1900 LowDwVfpRegister double_scratch = d0; |
1901 | 1901 |
1902 Register heap_number_result = no_reg; | 1902 Register heap_number_result = no_reg; |
1903 Register heap_number_map = r6; | 1903 Register heap_number_map = r6; |
1904 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1904 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
1905 | 1905 |
1906 Label call_runtime; | 1906 Label call_runtime; |
1907 // Labels for type transition, used for wrong input or output types. | 1907 // Labels for type transition, used for wrong input or output types. |
1908 // Both label are currently actually bound to the same position. We use two | 1908 // Both label are currently actually bound to the same position. We use two |
1909 // different label to differentiate the cause leading to type transition. | 1909 // different label to differentiate the cause leading to type transition. |
1910 Label transition; | 1910 Label transition; |
1911 | 1911 |
1912 // Smi-smi fast case. | 1912 // Smi-smi fast case. |
1913 Label skip; | 1913 Label skip; |
1914 __ orr(scratch1, left, right); | 1914 __ orr(scratch1, left, right); |
1915 __ JumpIfNotSmi(scratch1, &skip); | 1915 __ JumpIfNotSmi(scratch1, &skip); |
1916 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); | 1916 BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3); |
1917 // Fall through if the result is not a smi. | 1917 // Fall through if the result is not a smi. |
1918 __ bind(&skip); | 1918 __ bind(&skip); |
1919 | 1919 |
1920 switch (op_) { | 1920 switch (op_) { |
1921 case Token::ADD: | 1921 case Token::ADD: |
1922 case Token::SUB: | 1922 case Token::SUB: |
1923 case Token::MUL: | 1923 case Token::MUL: |
1924 case Token::DIV: | 1924 case Token::DIV: |
1925 case Token::MOD: { | 1925 case Token::MOD: { |
1926 // It could be that only SMIs have been seen at either the left | 1926 // It could be that only SMIs have been seen at either the left |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2000 // A DIV operation expecting an integer result falls through | 2000 // A DIV operation expecting an integer result falls through |
2001 // to type transition. | 2001 // to type transition. |
2002 | 2002 |
2003 } else { | 2003 } else { |
2004 if (encoded_right_arg_.has_value) { | 2004 if (encoded_right_arg_.has_value) { |
2005 __ Vmov(d8, fixed_right_arg_value(), scratch1); | 2005 __ Vmov(d8, fixed_right_arg_value(), scratch1); |
2006 __ VFPCompareAndSetFlags(d1, d8); | 2006 __ VFPCompareAndSetFlags(d1, d8); |
2007 __ b(ne, &transition); | 2007 __ b(ne, &transition); |
2008 } | 2008 } |
2009 | 2009 |
2010 // We preserved r0 and r1 to be able to call runtime. | |
2011 // Save the left value on the stack. | |
2012 __ Push(r5, r4); | |
2013 | |
2014 Label pop_and_call_runtime; | |
2015 | |
2016 // Allocate a heap number to store the result. | 2010 // Allocate a heap number to store the result. |
2017 heap_number_result = r5; | 2011 heap_number_result = r5; |
2018 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2012 BinaryOpStub_GenerateHeapResultAllocation(masm, |
2019 heap_number_result, | 2013 heap_number_result, |
2020 heap_number_map, | 2014 heap_number_map, |
2021 scratch1, | 2015 scratch1, |
2022 scratch2, | 2016 scratch2, |
2023 &pop_and_call_runtime, | 2017 &call_runtime, |
2024 mode_); | 2018 mode_); |
2025 | 2019 |
2026 // Load the left value from the value saved on the stack. | |
2027 __ Pop(r1, r0); | |
2028 | |
2029 // Call the C function to handle the double operation. | 2020 // Call the C function to handle the double operation. |
2030 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); | 2021 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); |
2031 if (FLAG_debug_code) { | 2022 if (FLAG_debug_code) { |
2032 __ stop("Unreachable code."); | 2023 __ stop("Unreachable code."); |
2033 } | 2024 } |
2034 | 2025 |
2035 __ bind(&pop_and_call_runtime); | |
2036 __ Drop(2); | |
2037 __ b(&call_runtime); | 2026 __ b(&call_runtime); |
2038 } | 2027 } |
2039 | 2028 |
2040 break; | 2029 break; |
2041 } | 2030 } |
2042 | 2031 |
2043 case Token::BIT_OR: | 2032 case Token::BIT_OR: |
2044 case Token::BIT_XOR: | 2033 case Token::BIT_XOR: |
2045 case Token::BIT_AND: | 2034 case Token::BIT_AND: |
2046 case Token::SAR: | 2035 case Token::SAR: |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2177 __ bind(&done); | 2166 __ bind(&done); |
2178 | 2167 |
2179 GenerateNumberStub(masm); | 2168 GenerateNumberStub(masm); |
2180 } | 2169 } |
2181 | 2170 |
2182 | 2171 |
2183 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { | 2172 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
2184 Label call_runtime, transition; | 2173 Label call_runtime, transition; |
2185 BinaryOpStub_GenerateFPOperation( | 2174 BinaryOpStub_GenerateFPOperation( |
2186 masm, left_type_, right_type_, false, | 2175 masm, left_type_, right_type_, false, |
2187 &transition, &call_runtime, &transition, op_, mode_); | 2176 &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9); |
2188 | 2177 |
2189 __ bind(&transition); | 2178 __ bind(&transition); |
2190 GenerateTypeTransition(masm); | 2179 GenerateTypeTransition(masm); |
2191 | 2180 |
2192 __ bind(&call_runtime); | 2181 __ bind(&call_runtime); |
2193 { | 2182 { |
2194 FrameScope scope(masm, StackFrame::INTERNAL); | 2183 FrameScope scope(masm, StackFrame::INTERNAL); |
2195 GenerateRegisterArgsPush(masm); | 2184 GenerateRegisterArgsPush(masm); |
2196 GenerateCallRuntime(masm); | 2185 GenerateCallRuntime(masm); |
2197 } | 2186 } |
2198 __ Ret(); | 2187 __ Ret(); |
2199 } | 2188 } |
2200 | 2189 |
2201 | 2190 |
2202 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 2191 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
2203 Label call_runtime, call_string_add_or_runtime, transition; | 2192 Label call_runtime, call_string_add_or_runtime, transition; |
2204 | 2193 |
2205 BinaryOpStub_GenerateSmiCode( | 2194 BinaryOpStub_GenerateSmiCode( |
2206 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); | 2195 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_, |
2196 r5, r6, r4, r9); | |
2207 | 2197 |
2208 BinaryOpStub_GenerateFPOperation( | 2198 BinaryOpStub_GenerateFPOperation( |
2209 masm, left_type_, right_type_, false, | 2199 masm, left_type_, right_type_, false, |
2210 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); | 2200 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6, |
2201 r4, r5, r9); | |
2211 | 2202 |
2212 __ bind(&transition); | 2203 __ bind(&transition); |
2213 GenerateTypeTransition(masm); | 2204 GenerateTypeTransition(masm); |
2214 | 2205 |
2215 __ bind(&call_string_add_or_runtime); | 2206 __ bind(&call_string_add_or_runtime); |
2216 if (op_ == Token::ADD) { | 2207 if (op_ == Token::ADD) { |
2217 GenerateAddStrings(masm); | 2208 GenerateAddStrings(masm); |
2218 } | 2209 } |
2219 | 2210 |
2220 __ bind(&call_runtime); | 2211 __ bind(&call_runtime); |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2302 // Untagged case: double input in d2, double result goes | 2293 // Untagged case: double input in d2, double result goes |
2303 // into d2. | 2294 // into d2. |
2304 // Tagged case: tagged input on top of stack and in r0, | 2295 // Tagged case: tagged input on top of stack and in r0, |
2305 // tagged result (heap number) goes into r0. | 2296 // tagged result (heap number) goes into r0. |
2306 | 2297 |
2307 Label input_not_smi; | 2298 Label input_not_smi; |
2308 Label loaded; | 2299 Label loaded; |
2309 Label calculate; | 2300 Label calculate; |
2310 Label invalid_cache; | 2301 Label invalid_cache; |
2311 const Register scratch0 = r9; | 2302 const Register scratch0 = r9; |
2312 const Register scratch1 = r7; | 2303 const Register scratch1 = r4; |
2313 const Register cache_entry = r0; | 2304 const Register cache_entry = r0; |
2314 const bool tagged = (argument_type_ == TAGGED); | 2305 const bool tagged = (argument_type_ == TAGGED); |
2315 | 2306 |
2316 if (tagged) { | 2307 if (tagged) { |
2317 // Argument is a number and is on stack and in r0. | 2308 // Argument is a number and is on stack and in r0. |
2318 // Load argument and check if it is a smi. | 2309 // Load argument and check if it is a smi. |
2319 __ JumpIfNotSmi(r0, &input_not_smi); | 2310 __ JumpIfNotSmi(r0, &input_not_smi); |
2320 | 2311 |
2321 // Input is a smi. Convert to double and load the low and high words | 2312 // Input is a smi. Convert to double and load the low and high words |
2322 // of the double into r2, r3. | 2313 // of the double into r2, r3. |
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2534 const Register base = r1; | 2525 const Register base = r1; |
2535 const Register exponent = r2; | 2526 const Register exponent = r2; |
2536 const Register heapnumbermap = r5; | 2527 const Register heapnumbermap = r5; |
2537 const Register heapnumber = r0; | 2528 const Register heapnumber = r0; |
2538 const DwVfpRegister double_base = d1; | 2529 const DwVfpRegister double_base = d1; |
2539 const DwVfpRegister double_exponent = d2; | 2530 const DwVfpRegister double_exponent = d2; |
2540 const DwVfpRegister double_result = d3; | 2531 const DwVfpRegister double_result = d3; |
2541 const DwVfpRegister double_scratch = d0; | 2532 const DwVfpRegister double_scratch = d0; |
2542 const SwVfpRegister single_scratch = s0; | 2533 const SwVfpRegister single_scratch = s0; |
2543 const Register scratch = r9; | 2534 const Register scratch = r9; |
2544 const Register scratch2 = r7; | 2535 const Register scratch2 = r4; |
2545 | 2536 |
2546 Label call_runtime, done, int_exponent; | 2537 Label call_runtime, done, int_exponent; |
2547 if (exponent_type_ == ON_STACK) { | 2538 if (exponent_type_ == ON_STACK) { |
2548 Label base_is_smi, unpack_exponent; | 2539 Label base_is_smi, unpack_exponent; |
2549 // The exponent and base are supplied as arguments on the stack. | 2540 // The exponent and base are supplied as arguments on the stack. |
2550 // This can only happen if the stub is called from non-optimized code. | 2541 // This can only happen if the stub is called from non-optimized code. |
2551 // Load input parameters from stack to double registers. | 2542 // Load input parameters from stack to double registers. |
2552 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); | 2543 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); |
2553 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); | 2544 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); |
2554 | 2545 |
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3043 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; | 3034 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
3044 __ ldr(r4, MemOperand(sp, offset_to_argv)); | 3035 __ ldr(r4, MemOperand(sp, offset_to_argv)); |
3045 | 3036 |
3046 // Push a frame with special values setup to mark it as an entry frame. | 3037 // Push a frame with special values setup to mark it as an entry frame. |
3047 // r0: code entry | 3038 // r0: code entry |
3048 // r1: function | 3039 // r1: function |
3049 // r2: receiver | 3040 // r2: receiver |
3050 // r3: argc | 3041 // r3: argc |
3051 // r4: argv | 3042 // r4: argv |
3052 Isolate* isolate = masm->isolate(); | 3043 Isolate* isolate = masm->isolate(); |
3053 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. | |
3054 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 3044 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
3055 __ mov(r7, Operand(Smi::FromInt(marker))); | 3045 __ mov(r8, Operand(Smi::FromInt(marker))); |
3056 __ mov(r6, Operand(Smi::FromInt(marker))); | 3046 __ mov(r6, Operand(Smi::FromInt(marker))); |
3057 __ mov(r5, | 3047 __ mov(r5, |
3058 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); | 3048 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
3059 __ ldr(r5, MemOperand(r5)); | 3049 __ ldr(r5, MemOperand(r5)); |
3060 __ Push(r8, r7, r6, r5); | 3050 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
3051 __ Push(ip, r8, r6, r5); | |
3061 | 3052 |
3062 // Set up frame pointer for the frame to be pushed. | 3053 // Set up frame pointer for the frame to be pushed. |
3063 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 3054 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
3064 | 3055 |
3065 // If this is the outermost JS call, set js_entry_sp value. | 3056 // If this is the outermost JS call, set js_entry_sp value. |
3066 Label non_outermost_js; | 3057 Label non_outermost_js; |
3067 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); | 3058 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); |
3068 __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 3059 __ mov(r5, Operand(ExternalReference(js_entry_sp))); |
3069 __ ldr(r6, MemOperand(r5)); | 3060 __ ldr(r6, MemOperand(r5)); |
3070 __ cmp(r6, Operand::Zero()); | 3061 __ cmp(r6, Operand::Zero()); |
(...skipping 25 matching lines...) Expand all Loading... | |
3096 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 3087 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
3097 isolate))); | 3088 isolate))); |
3098 } | 3089 } |
3099 __ str(r0, MemOperand(ip)); | 3090 __ str(r0, MemOperand(ip)); |
3100 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); | 3091 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); |
3101 __ b(&exit); | 3092 __ b(&exit); |
3102 | 3093 |
3103 // Invoke: Link this frame into the handler chain. There's only one | 3094 // Invoke: Link this frame into the handler chain. There's only one |
3104 // handler block in this code object, so its index is 0. | 3095 // handler block in this code object, so its index is 0. |
3105 __ bind(&invoke); | 3096 __ bind(&invoke); |
3106 // Must preserve r0-r4, r5-r7 are available. | 3097 // Must preserve r0-r4, r5-r6 are available. |
3107 __ PushTryHandler(StackHandler::JS_ENTRY, 0); | 3098 __ PushTryHandler(StackHandler::JS_ENTRY, 0); |
3108 // If an exception not caught by another handler occurs, this handler | 3099 // If an exception not caught by another handler occurs, this handler |
3109 // returns control to the code after the bl(&invoke) above, which | 3100 // returns control to the code after the bl(&invoke) above, which |
3110 // restores all kCalleeSaved registers (including cp and fp) to their | 3101 // restores all kCalleeSaved registers (including cp and fp) to their |
3111 // saved values before returning a failure to C. | 3102 // saved values before returning a failure to C. |
3112 | 3103 |
3113 // Clear any pending exceptions. | 3104 // Clear any pending exceptions. |
3114 __ mov(r5, Operand(isolate->factory()->the_hole_value())); | 3105 __ mov(r5, Operand(isolate->factory()->the_hole_value())); |
3115 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 3106 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
3116 isolate))); | 3107 isolate))); |
(...skipping 587 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3704 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 | 3695 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 |
3705 // The mapped parameter thus need to get indices | 3696 // The mapped parameter thus need to get indices |
3706 // MIN_CONTEXT_SLOTS+parameter_count-1 .. | 3697 // MIN_CONTEXT_SLOTS+parameter_count-1 .. |
3707 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 3698 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count |
3708 // We loop from right to left. | 3699 // We loop from right to left. |
3709 Label parameters_loop, parameters_test; | 3700 Label parameters_loop, parameters_test; |
3710 __ mov(r6, r1); | 3701 __ mov(r6, r1); |
3711 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); | 3702 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); |
3712 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); | 3703 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
3713 __ sub(r9, r9, Operand(r1)); | 3704 __ sub(r9, r9, Operand(r1)); |
3714 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); | 3705 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); |
3715 __ add(r3, r4, Operand(r6, LSL, 1)); | 3706 __ add(r3, r4, Operand(r6, LSL, 1)); |
3716 __ add(r3, r3, Operand(kParameterMapHeaderSize)); | 3707 __ add(r3, r3, Operand(kParameterMapHeaderSize)); |
3717 | 3708 |
3718 // r6 = loop variable (tagged) | 3709 // r6 = loop variable (tagged) |
3719 // r1 = mapping index (tagged) | 3710 // r1 = mapping index (tagged) |
3720 // r3 = address of backing store (tagged) | 3711 // r3 = address of backing store (tagged) |
3721 // r4 = address of parameter map (tagged) | 3712 // r4 = address of parameter map (tagged) |
3722 // r5 = temporary scratch (a.o., for address calculation) | 3713 // r0 = temporary scratch (a.o., for address calculation) |
3723 // r7 = the hole value | 3714 // r5 = the hole value |
3724 __ jmp(¶meters_test); | 3715 __ jmp(¶meters_test); |
3725 | 3716 |
3726 __ bind(¶meters_loop); | 3717 __ bind(¶meters_loop); |
3727 __ sub(r6, r6, Operand(Smi::FromInt(1))); | 3718 __ sub(r6, r6, Operand(Smi::FromInt(1))); |
3728 __ mov(r5, Operand(r6, LSL, 1)); | 3719 __ mov(r0, Operand(r6, LSL, 1)); |
3729 __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); | 3720 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag)); |
3730 __ str(r9, MemOperand(r4, r5)); | 3721 __ str(r9, MemOperand(r4, r0)); |
3731 __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); | 3722 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); |
3732 __ str(r7, MemOperand(r3, r5)); | 3723 __ str(r5, MemOperand(r3, r0)); |
3733 __ add(r9, r9, Operand(Smi::FromInt(1))); | 3724 __ add(r9, r9, Operand(Smi::FromInt(1))); |
3734 __ bind(¶meters_test); | 3725 __ bind(¶meters_test); |
3735 __ cmp(r6, Operand(Smi::FromInt(0))); | 3726 __ cmp(r6, Operand(Smi::FromInt(0))); |
3736 __ b(ne, ¶meters_loop); | 3727 __ b(ne, ¶meters_loop); |
3737 | 3728 |
3729 __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize)); | |
3730 | |
3738 __ bind(&skip_parameter_map); | 3731 __ bind(&skip_parameter_map); |
3732 // r0 = address of new object (tagged) | |
3739 // r2 = argument count (tagged) | 3733 // r2 = argument count (tagged) |
3740 // r3 = address of backing store (tagged) | 3734 // r3 = address of backing store (tagged) |
3741 // r5 = scratch | 3735 // r5 = scratch |
3742 // Copy arguments header and remaining slots (if there are any). | 3736 // Copy arguments header and remaining slots (if there are any). |
3743 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); | 3737 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); |
3744 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); | 3738 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); |
3745 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 3739 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
3746 | 3740 |
3747 Label arguments_loop, arguments_test; | 3741 Label arguments_loop, arguments_test; |
3748 __ mov(r9, r1); | 3742 __ mov(r9, r1); |
(...skipping 10 matching lines...) Expand all Loading... | |
3759 | 3753 |
3760 __ bind(&arguments_test); | 3754 __ bind(&arguments_test); |
3761 __ cmp(r9, Operand(r2)); | 3755 __ cmp(r9, Operand(r2)); |
3762 __ b(lt, &arguments_loop); | 3756 __ b(lt, &arguments_loop); |
3763 | 3757 |
3764 // Return and remove the on-stack parameters. | 3758 // Return and remove the on-stack parameters. |
3765 __ add(sp, sp, Operand(3 * kPointerSize)); | 3759 __ add(sp, sp, Operand(3 * kPointerSize)); |
3766 __ Ret(); | 3760 __ Ret(); |
3767 | 3761 |
3768 // Do the runtime call to allocate the arguments object. | 3762 // Do the runtime call to allocate the arguments object. |
3763 // r0 = address of new object (tagged) | |
3769 // r2 = argument count (tagged) | 3764 // r2 = argument count (tagged) |
3770 __ bind(&runtime); | 3765 __ bind(&runtime); |
3771 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. | 3766 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
3772 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 3767 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
3773 } | 3768 } |
3774 | 3769 |
3775 | 3770 |
3776 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | 3771 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
3777 // sp[0] : number of parameters | 3772 // sp[0] : number of parameters |
3778 // sp[4] : receiver displacement | 3773 // sp[4] : receiver displacement |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3887 const int kJSRegExpOffset = 3 * kPointerSize; | 3882 const int kJSRegExpOffset = 3 * kPointerSize; |
3888 | 3883 |
3889 Label runtime; | 3884 Label runtime; |
3890 // Allocation of registers for this function. These are in callee save | 3885 // Allocation of registers for this function. These are in callee save |
3891 // registers and will be preserved by the call to the native RegExp code, as | 3886 // registers and will be preserved by the call to the native RegExp code, as |
3892 // this code is called using the normal C calling convention. When calling | 3887 // this code is called using the normal C calling convention. When calling |
3893 // directly from generated code the native RegExp code will not do a GC and | 3888 // directly from generated code the native RegExp code will not do a GC and |
3894 // therefore the content of these registers are safe to use after the call. | 3889 // therefore the content of these registers are safe to use after the call. |
3895 Register subject = r4; | 3890 Register subject = r4; |
3896 Register regexp_data = r5; | 3891 Register regexp_data = r5; |
3897 Register last_match_info_elements = r6; | 3892 Register last_match_info_elements = no_reg; // will be r6; |
3898 | 3893 |
3899 // Ensure that a RegExp stack is allocated. | 3894 // Ensure that a RegExp stack is allocated. |
3900 Isolate* isolate = masm->isolate(); | 3895 Isolate* isolate = masm->isolate(); |
3901 ExternalReference address_of_regexp_stack_memory_address = | 3896 ExternalReference address_of_regexp_stack_memory_address = |
3902 ExternalReference::address_of_regexp_stack_memory_address(isolate); | 3897 ExternalReference::address_of_regexp_stack_memory_address(isolate); |
3903 ExternalReference address_of_regexp_stack_memory_size = | 3898 ExternalReference address_of_regexp_stack_memory_size = |
3904 ExternalReference::address_of_regexp_stack_memory_size(isolate); | 3899 ExternalReference::address_of_regexp_stack_memory_size(isolate); |
3905 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); | 3900 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); |
3906 __ ldr(r0, MemOperand(r0, 0)); | 3901 __ ldr(r0, MemOperand(r0, 0)); |
3907 __ cmp(r0, Operand::Zero()); | 3902 __ cmp(r0, Operand::Zero()); |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4020 __ JumpIfNotSmi(r1, &runtime); | 4015 __ JumpIfNotSmi(r1, &runtime); |
4021 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); | 4016 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); |
4022 __ cmp(r3, Operand(r1)); | 4017 __ cmp(r3, Operand(r1)); |
4023 __ b(ls, &runtime); | 4018 __ b(ls, &runtime); |
4024 __ SmiUntag(r1); | 4019 __ SmiUntag(r1); |
4025 | 4020 |
4026 STATIC_ASSERT(4 == kOneByteStringTag); | 4021 STATIC_ASSERT(4 == kOneByteStringTag); |
4027 STATIC_ASSERT(kTwoByteStringTag == 0); | 4022 STATIC_ASSERT(kTwoByteStringTag == 0); |
4028 __ and_(r0, r0, Operand(kStringEncodingMask)); | 4023 __ and_(r0, r0, Operand(kStringEncodingMask)); |
4029 __ mov(r3, Operand(r0, ASR, 2), SetCC); | 4024 __ mov(r3, Operand(r0, ASR, 2), SetCC); |
4030 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); | 4025 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); |
4031 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); | 4026 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); |
4032 | 4027 |
4033 // (E) Carry on. String handling is done. | 4028 // (E) Carry on. String handling is done. |
4034 // r7: irregexp code | 4029 // r6: irregexp code |
4035 // Check that the irregexp code has been generated for the actual string | 4030 // Check that the irregexp code has been generated for the actual string |
4036 // encoding. If it has, the field contains a code object otherwise it contains | 4031 // encoding. If it has, the field contains a code object otherwise it contains |
4037 // a smi (code flushing support). | 4032 // a smi (code flushing support). |
4038 __ JumpIfSmi(r7, &runtime); | 4033 __ JumpIfSmi(r6, &runtime); |
4039 | 4034 |
4040 // r1: previous index | 4035 // r1: previous index |
4041 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); | 4036 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); |
4042 // r7: code | 4037 // r6: code |
4043 // subject: Subject string | 4038 // subject: Subject string |
4044 // regexp_data: RegExp data (FixedArray) | 4039 // regexp_data: RegExp data (FixedArray) |
4045 // All checks done. Now push arguments for native regexp code. | 4040 // All checks done. Now push arguments for native regexp code. |
4046 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); | 4041 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); |
4047 | 4042 |
4048 // Isolates: note we add an additional parameter here (isolate pointer). | 4043 // Isolates: note we add an additional parameter here (isolate pointer). |
4049 const int kRegExpExecuteArguments = 9; | 4044 const int kRegExpExecuteArguments = 9; |
4050 const int kParameterRegisters = 4; | 4045 const int kParameterRegisters = 4; |
4051 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); | 4046 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); |
4052 | 4047 |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4099 __ SmiUntag(r8); | 4094 __ SmiUntag(r8); |
4100 __ add(r3, r9, Operand(r8, LSL, r3)); | 4095 __ add(r3, r9, Operand(r8, LSL, r3)); |
4101 | 4096 |
4102 // Argument 2 (r1): Previous index. | 4097 // Argument 2 (r1): Previous index. |
4103 // Already there | 4098 // Already there |
4104 | 4099 |
4105 // Argument 1 (r0): Subject string. | 4100 // Argument 1 (r0): Subject string. |
4106 __ mov(r0, subject); | 4101 __ mov(r0, subject); |
4107 | 4102 |
4108 // Locate the code entry and call it. | 4103 // Locate the code entry and call it. |
4109 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4104 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); |
4110 DirectCEntryStub stub; | 4105 DirectCEntryStub stub; |
4111 stub.GenerateCall(masm, r7); | 4106 stub.GenerateCall(masm, r6); |
4112 | 4107 |
4113 __ LeaveExitFrame(false, no_reg); | 4108 __ LeaveExitFrame(false, no_reg); |
4114 | 4109 |
4110 last_match_info_elements = r6; | |
4111 | |
4115 // r0: result | 4112 // r0: result |
4116 // subject: subject string (callee saved) | 4113 // subject: subject string (callee saved) |
4117 // regexp_data: RegExp data (callee saved) | 4114 // regexp_data: RegExp data (callee saved) |
4118 // last_match_info_elements: Last match info elements (callee saved) | 4115 // last_match_info_elements: Last match info elements (callee saved) |
4119 // Check the result. | 4116 // Check the result. |
4120 Label success; | 4117 Label success; |
4121 __ cmp(r0, Operand(1)); | 4118 __ cmp(r0, Operand(1)); |
4122 // We expect exactly one result since we force the called regexp to behave | 4119 // We expect exactly one result since we force the called regexp to behave |
4123 // as non-global. | 4120 // as non-global. |
4124 __ b(eq, &success); | 4121 __ b(eq, &success); |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4193 __ str(r2, FieldMemOperand(last_match_info_elements, | 4190 __ str(r2, FieldMemOperand(last_match_info_elements, |
4194 RegExpImpl::kLastCaptureCountOffset)); | 4191 RegExpImpl::kLastCaptureCountOffset)); |
4195 // Store last subject and last input. | 4192 // Store last subject and last input. |
4196 __ str(subject, | 4193 __ str(subject, |
4197 FieldMemOperand(last_match_info_elements, | 4194 FieldMemOperand(last_match_info_elements, |
4198 RegExpImpl::kLastSubjectOffset)); | 4195 RegExpImpl::kLastSubjectOffset)); |
4199 __ mov(r2, subject); | 4196 __ mov(r2, subject); |
4200 __ RecordWriteField(last_match_info_elements, | 4197 __ RecordWriteField(last_match_info_elements, |
4201 RegExpImpl::kLastSubjectOffset, | 4198 RegExpImpl::kLastSubjectOffset, |
4202 subject, | 4199 subject, |
4203 r7, | 4200 r3, |
4204 kLRHasNotBeenSaved, | 4201 kLRHasNotBeenSaved, |
4205 kDontSaveFPRegs); | 4202 kDontSaveFPRegs); |
4206 __ mov(subject, r2); | 4203 __ mov(subject, r2); |
4207 __ str(subject, | 4204 __ str(subject, |
4208 FieldMemOperand(last_match_info_elements, | 4205 FieldMemOperand(last_match_info_elements, |
4209 RegExpImpl::kLastInputOffset)); | 4206 RegExpImpl::kLastInputOffset)); |
4210 __ RecordWriteField(last_match_info_elements, | 4207 __ RecordWriteField(last_match_info_elements, |
4211 RegExpImpl::kLastInputOffset, | 4208 RegExpImpl::kLastInputOffset, |
4212 subject, | 4209 subject, |
4213 r7, | 4210 r3, |
4214 kLRHasNotBeenSaved, | 4211 kLRHasNotBeenSaved, |
4215 kDontSaveFPRegs); | 4212 kDontSaveFPRegs); |
4216 | 4213 |
4217 // Get the static offsets vector filled by the native regexp code. | 4214 // Get the static offsets vector filled by the native regexp code. |
4218 ExternalReference address_of_static_offsets_vector = | 4215 ExternalReference address_of_static_offsets_vector = |
4219 ExternalReference::address_of_static_offsets_vector(isolate); | 4216 ExternalReference::address_of_static_offsets_vector(isolate); |
4220 __ mov(r2, Operand(address_of_static_offsets_vector)); | 4217 __ mov(r2, Operand(address_of_static_offsets_vector)); |
4221 | 4218 |
4222 // r1: number of capture registers | 4219 // r1: number of capture registers |
4223 // r2: offsets vector | 4220 // r2: offsets vector |
(...skipping 545 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4769 | 4766 |
4770 | 4767 |
4771 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, | 4768 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, |
4772 Register dest, | 4769 Register dest, |
4773 Register src, | 4770 Register src, |
4774 Register count, | 4771 Register count, |
4775 Register scratch1, | 4772 Register scratch1, |
4776 Register scratch2, | 4773 Register scratch2, |
4777 Register scratch3, | 4774 Register scratch3, |
4778 Register scratch4, | 4775 Register scratch4, |
4779 Register scratch5, | |
4780 int flags) { | 4776 int flags) { |
4781 bool ascii = (flags & COPY_ASCII) != 0; | 4777 bool ascii = (flags & COPY_ASCII) != 0; |
4782 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; | 4778 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; |
4783 | 4779 |
4784 if (dest_always_aligned && FLAG_debug_code) { | 4780 if (dest_always_aligned && FLAG_debug_code) { |
4785 // Check that destination is actually word aligned if the flag says | 4781 // Check that destination is actually word aligned if the flag says |
4786 // that it is. | 4782 // that it is. |
4787 __ tst(dest, Operand(kPointerAlignmentMask)); | 4783 __ tst(dest, Operand(kPointerAlignmentMask)); |
4788 __ Check(eq, "Destination of copy not aligned."); | 4784 __ Check(eq, "Destination of copy not aligned."); |
4789 } | 4785 } |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4844 __ and_(src, src, Operand(~3)); // Round down to load previous word. | 4840 __ and_(src, src, Operand(~3)); // Round down to load previous word. |
4845 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); | 4841 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); |
4846 // Store the "shift" most significant bits of scratch in the least | 4842 // Store the "shift" most significant bits of scratch in the least |
4847 // signficant bits (i.e., shift down by (32-shift)). | 4843 // signficant bits (i.e., shift down by (32-shift)). |
4848 __ rsb(scratch2, left_shift, Operand(32)); | 4844 __ rsb(scratch2, left_shift, Operand(32)); |
4849 Register right_shift = scratch2; | 4845 Register right_shift = scratch2; |
4850 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); | 4846 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); |
4851 | 4847 |
4852 __ bind(&loop); | 4848 __ bind(&loop); |
4853 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); | 4849 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); |
4854 __ sub(scratch5, limit, Operand(dest)); | |
4855 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); | 4850 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); |
4856 __ str(scratch1, MemOperand(dest, 4, PostIndex)); | 4851 __ str(scratch1, MemOperand(dest, 4, PostIndex)); |
4857 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); | 4852 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); |
4858 // Loop if four or more bytes left to copy. | 4853 // Loop if four or more bytes left to copy. |
4859 // Compare to eight, because we did the subtract before increasing dst. | 4854 __ sub(scratch3, limit, Operand(dest)); |
4860 __ sub(scratch5, scratch5, Operand(8), SetCC); | 4855 __ sub(scratch3, scratch3, Operand(4), SetCC); |
4861 __ b(ge, &loop); | 4856 __ b(ge, &loop); |
4862 } | 4857 } |
4863 // There is now between zero and three bytes left to copy (negative that | 4858 // There is now between zero and three bytes left to copy (negative that |
4864 // number is in scratch5), and between one and three bytes already read into | 4859 // number is in scratch3), and between one and three bytes already read into |
4865 // scratch1 (eight times that number in scratch4). We may have read past | 4860 // scratch1 (eight times that number in scratch4). We may have read past |
4866 // the end of the string, but because objects are aligned, we have not read | 4861 // the end of the string, but because objects are aligned, we have not read |
4867 // past the end of the object. | 4862 // past the end of the object. |
4868 // Find the minimum of remaining characters to move and preloaded characters | 4863 // Find the minimum of remaining characters to move and preloaded characters |
4869 // and write those as bytes. | 4864 // and write those as bytes. |
4870 __ add(scratch5, scratch5, Operand(4), SetCC); | 4865 __ add(scratch3, scratch3, Operand(4), SetCC); |
4871 __ b(eq, &done); | 4866 __ b(eq, &done); |
4872 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); | 4867 __ cmp(scratch4, Operand(scratch3, LSL, 3), ne); |
4873 // Move minimum of bytes read and bytes left to copy to scratch4. | 4868 // Move minimum of bytes read and bytes left to copy to scratch4. |
4874 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); | 4869 __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt); |
4875 // Between one and three (value in scratch5) characters already read into | 4870 // Between one and three (value in scratch3) characters already read into |
4876 // scratch ready to write. | 4871 // scratch ready to write. |
4877 __ cmp(scratch5, Operand(2)); | 4872 __ cmp(scratch3, Operand(2)); |
4878 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | 4873 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); |
4879 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); | 4874 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); |
4880 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); | 4875 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); |
4881 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); | 4876 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); |
4882 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); | 4877 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); |
4883 // Copy any remaining bytes. | 4878 // Copy any remaining bytes. |
4884 __ b(&byte_loop); | 4879 __ b(&byte_loop); |
4885 | 4880 |
4886 // Simple loop. | 4881 // Simple loop. |
4887 // Copy words from src to dst, until less than four bytes left. | 4882 // Copy words from src to dst, until less than four bytes left. |
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5207 // Allocate new sliced string. At this point we do not reload the instance | 5202 // Allocate new sliced string. At this point we do not reload the instance |
5208 // type including the string encoding because we simply rely on the info | 5203 // type including the string encoding because we simply rely on the info |
5209 // provided by the original string. It does not matter if the original | 5204 // provided by the original string. It does not matter if the original |
5210 // string's encoding is wrong because we always have to recheck encoding of | 5205 // string's encoding is wrong because we always have to recheck encoding of |
5211 // the newly created string's parent anyways due to externalized strings. | 5206 // the newly created string's parent anyways due to externalized strings. |
5212 Label two_byte_slice, set_slice_header; | 5207 Label two_byte_slice, set_slice_header; |
5213 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | 5208 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); |
5214 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | 5209 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); |
5215 __ tst(r1, Operand(kStringEncodingMask)); | 5210 __ tst(r1, Operand(kStringEncodingMask)); |
5216 __ b(eq, &two_byte_slice); | 5211 __ b(eq, &two_byte_slice); |
5217 __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); | 5212 __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime); |
5218 __ jmp(&set_slice_header); | 5213 __ jmp(&set_slice_header); |
5219 __ bind(&two_byte_slice); | 5214 __ bind(&two_byte_slice); |
5220 __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); | 5215 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime); |
5221 __ bind(&set_slice_header); | 5216 __ bind(&set_slice_header); |
5222 __ mov(r3, Operand(r3, LSL, 1)); | 5217 __ mov(r3, Operand(r3, LSL, 1)); |
5223 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); | 5218 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
5224 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); | 5219 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
5225 __ jmp(&return_r0); | 5220 __ jmp(&return_r0); |
5226 | 5221 |
5227 __ bind(©_routine); | 5222 __ bind(©_routine); |
5228 } | 5223 } |
5229 | 5224 |
5230 // r5: underlying subject string | 5225 // r5: underlying subject string |
(...skipping 20 matching lines...) Expand all Loading... | |
5251 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 5246 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
5252 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 5247 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
5253 | 5248 |
5254 __ bind(&allocate_result); | 5249 __ bind(&allocate_result); |
5255 // Sequential acii string. Allocate the result. | 5250 // Sequential acii string. Allocate the result. |
5256 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); | 5251 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); |
5257 __ tst(r1, Operand(kStringEncodingMask)); | 5252 __ tst(r1, Operand(kStringEncodingMask)); |
5258 __ b(eq, &two_byte_sequential); | 5253 __ b(eq, &two_byte_sequential); |
5259 | 5254 |
5260 // Allocate and copy the resulting ASCII string. | 5255 // Allocate and copy the resulting ASCII string. |
5261 __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); | 5256 __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime); |
5262 | 5257 |
5263 // Locate first character of substring to copy. | 5258 // Locate first character of substring to copy. |
5264 __ add(r5, r5, r3); | 5259 __ add(r5, r5, r3); |
5265 // Locate first character of result. | 5260 // Locate first character of result. |
5266 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 5261 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
5267 | 5262 |
5268 // r0: result string | 5263 // r0: result string |
5269 // r1: first character of result string | 5264 // r1: first character of result string |
5270 // r2: result string length | 5265 // r2: result string length |
5271 // r5: first character of substring to copy | 5266 // r5: first character of substring to copy |
5272 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 5267 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
5273 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, | 5268 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9, |
5274 COPY_ASCII | DEST_ALWAYS_ALIGNED); | 5269 COPY_ASCII | DEST_ALWAYS_ALIGNED); |
5275 __ jmp(&return_r0); | 5270 __ jmp(&return_r0); |
5276 | 5271 |
5277 // Allocate and copy the resulting two-byte string. | 5272 // Allocate and copy the resulting two-byte string. |
5278 __ bind(&two_byte_sequential); | 5273 __ bind(&two_byte_sequential); |
5279 __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); | 5274 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime); |
5280 | 5275 |
5281 // Locate first character of substring to copy. | 5276 // Locate first character of substring to copy. |
5282 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 5277 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
5283 __ add(r5, r5, Operand(r3, LSL, 1)); | 5278 __ add(r5, r5, Operand(r3, LSL, 1)); |
5284 // Locate first character of result. | 5279 // Locate first character of result. |
5285 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 5280 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
5286 | 5281 |
5287 // r0: result string. | 5282 // r0: result string. |
5288 // r1: first character of result. | 5283 // r1: first character of result. |
5289 // r2: result length. | 5284 // r2: result length. |
5290 // r5: first character of substring to copy. | 5285 // r5: first character of substring to copy. |
5291 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 5286 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
5292 StringHelper::GenerateCopyCharactersLong( | 5287 StringHelper::GenerateCopyCharactersLong( |
5293 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); | 5288 masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED); |
5294 | 5289 |
5295 __ bind(&return_r0); | 5290 __ bind(&return_r0); |
5296 Counters* counters = masm->isolate()->counters(); | 5291 Counters* counters = masm->isolate()->counters(); |
5297 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); | 5292 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); |
5298 __ Drop(3); | 5293 __ Drop(3); |
5299 __ Ret(); | 5294 __ Ret(); |
5300 | 5295 |
5301 // Just jump to runtime to create the sub string. | 5296 // Just jump to runtime to create the sub string. |
5302 __ bind(&runtime); | 5297 __ bind(&runtime); |
5303 __ TailCallRuntime(Runtime::kSubString, 3, 1); | 5298 __ TailCallRuntime(Runtime::kSubString, 3, 1); |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5549 __ cmp(r6, Operand(2)); | 5544 __ cmp(r6, Operand(2)); |
5550 __ b(ne, &longer_than_two); | 5545 __ b(ne, &longer_than_two); |
5551 | 5546 |
5552 // Check that both strings are non-external ASCII strings. | 5547 // Check that both strings are non-external ASCII strings. |
5553 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { | 5548 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
5554 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5549 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
5555 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 5550 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
5556 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 5551 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
5557 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 5552 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
5558 } | 5553 } |
5559 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, | 5554 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3, |
5560 &call_runtime); | 5555 &call_runtime); |
5561 | 5556 |
5562 // Get the two characters forming the sub string. | 5557 // Get the two characters forming the sub string. |
5563 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); | 5558 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); |
5564 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); | 5559 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); |
5565 | 5560 |
5566 // Try to lookup two character string in string table. If it is not found | 5561 // Try to lookup two character string in string table. If it is not found |
5567 // just allocate a new one. | 5562 // just allocate a new one. |
5568 Label make_two_character_string; | 5563 Label make_two_character_string; |
5569 StringHelper::GenerateTwoCharacterStringTableProbe( | 5564 StringHelper::GenerateTwoCharacterStringTableProbe( |
5570 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); | 5565 masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string); |
5571 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5566 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
5572 __ add(sp, sp, Operand(2 * kPointerSize)); | 5567 __ add(sp, sp, Operand(2 * kPointerSize)); |
5573 __ Ret(); | 5568 __ Ret(); |
5574 | 5569 |
5575 __ bind(&make_two_character_string); | 5570 __ bind(&make_two_character_string); |
5576 // Resulting string has length 2 and first chars of two strings | 5571 // Resulting string has length 2 and first chars of two strings |
5577 // are combined into single halfword in r2 register. | 5572 // are combined into single halfword in r2 register. |
5578 // So we can fill resulting string without two loops by a single | 5573 // So we can fill resulting string without two loops by a single |
5579 // halfword store instruction (which assumes that processor is | 5574 // halfword store instruction (which assumes that processor is |
5580 // in a little endian mode) | 5575 // in a little endian mode) |
(...skipping 24 matching lines...) Expand all Loading... | |
5605 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 5600 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
5606 } | 5601 } |
5607 Label non_ascii, allocated, ascii_data; | 5602 Label non_ascii, allocated, ascii_data; |
5608 STATIC_ASSERT(kTwoByteStringTag == 0); | 5603 STATIC_ASSERT(kTwoByteStringTag == 0); |
5609 __ tst(r4, Operand(kStringEncodingMask)); | 5604 __ tst(r4, Operand(kStringEncodingMask)); |
5610 __ tst(r5, Operand(kStringEncodingMask), ne); | 5605 __ tst(r5, Operand(kStringEncodingMask), ne); |
5611 __ b(eq, &non_ascii); | 5606 __ b(eq, &non_ascii); |
5612 | 5607 |
5613 // Allocate an ASCII cons string. | 5608 // Allocate an ASCII cons string. |
5614 __ bind(&ascii_data); | 5609 __ bind(&ascii_data); |
5615 __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); | 5610 __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime); |
5616 __ bind(&allocated); | 5611 __ bind(&allocated); |
5617 // Fill the fields of the cons string. | 5612 // Fill the fields of the cons string. |
5618 Label skip_write_barrier, after_writing; | 5613 Label skip_write_barrier, after_writing; |
5619 ExternalReference high_promotion_mode = ExternalReference:: | 5614 ExternalReference high_promotion_mode = ExternalReference:: |
5620 new_space_high_promotion_mode_active_address(masm->isolate()); | 5615 new_space_high_promotion_mode_active_address(masm->isolate()); |
5621 __ mov(r4, Operand(high_promotion_mode)); | 5616 __ mov(r4, Operand(high_promotion_mode)); |
5622 __ ldr(r4, MemOperand(r4, 0)); | 5617 __ ldr(r4, MemOperand(r4, 0)); |
5623 __ cmp(r4, Operand::Zero()); | 5618 __ cmp(r4, Operand::Zero()); |
5624 __ b(eq, &skip_write_barrier); | 5619 __ b(eq, &skip_write_barrier); |
5625 | 5620 |
5626 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); | 5621 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset)); |
5627 __ RecordWriteField(r7, | 5622 __ RecordWriteField(r3, |
5628 ConsString::kFirstOffset, | 5623 ConsString::kFirstOffset, |
5629 r0, | 5624 r0, |
5630 r4, | 5625 r4, |
5631 kLRHasNotBeenSaved, | 5626 kLRHasNotBeenSaved, |
5632 kDontSaveFPRegs); | 5627 kDontSaveFPRegs); |
5633 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); | 5628 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset)); |
5634 __ RecordWriteField(r7, | 5629 __ RecordWriteField(r3, |
5635 ConsString::kSecondOffset, | 5630 ConsString::kSecondOffset, |
5636 r1, | 5631 r1, |
5637 r4, | 5632 r4, |
5638 kLRHasNotBeenSaved, | 5633 kLRHasNotBeenSaved, |
5639 kDontSaveFPRegs); | 5634 kDontSaveFPRegs); |
5640 __ jmp(&after_writing); | 5635 __ jmp(&after_writing); |
5641 | 5636 |
5642 __ bind(&skip_write_barrier); | 5637 __ bind(&skip_write_barrier); |
5643 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); | 5638 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset)); |
5644 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); | 5639 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset)); |
5645 | 5640 |
5646 __ bind(&after_writing); | 5641 __ bind(&after_writing); |
5647 | 5642 |
5648 __ mov(r0, Operand(r7)); | 5643 __ mov(r0, Operand(r3)); |
5649 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5644 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
5650 __ add(sp, sp, Operand(2 * kPointerSize)); | 5645 __ add(sp, sp, Operand(2 * kPointerSize)); |
5651 __ Ret(); | 5646 __ Ret(); |
5652 | 5647 |
5653 __ bind(&non_ascii); | 5648 __ bind(&non_ascii); |
5654 // At least one of the strings is two-byte. Check whether it happens | 5649 // At least one of the strings is two-byte. Check whether it happens |
5655 // to contain only one byte characters. | 5650 // to contain only one byte characters. |
5656 // r4: first instance type. | 5651 // r4: first instance type. |
5657 // r5: second instance type. | 5652 // r5: second instance type. |
5658 __ tst(r4, Operand(kOneByteDataHintMask)); | 5653 __ tst(r4, Operand(kOneByteDataHintMask)); |
5659 __ tst(r5, Operand(kOneByteDataHintMask), ne); | 5654 __ tst(r5, Operand(kOneByteDataHintMask), ne); |
5660 __ b(ne, &ascii_data); | 5655 __ b(ne, &ascii_data); |
5661 __ eor(r4, r4, Operand(r5)); | 5656 __ eor(r4, r4, Operand(r5)); |
5662 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); | 5657 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); |
5663 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); | 5658 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); |
5664 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); | 5659 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); |
5665 __ b(eq, &ascii_data); | 5660 __ b(eq, &ascii_data); |
5666 | 5661 |
5667 // Allocate a two byte cons string. | 5662 // Allocate a two byte cons string. |
5668 __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); | 5663 __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime); |
5669 __ jmp(&allocated); | 5664 __ jmp(&allocated); |
5670 | 5665 |
5671 // We cannot encounter sliced strings or cons strings here since: | 5666 // We cannot encounter sliced strings or cons strings here since: |
5672 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); | 5667 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); |
5673 // Handle creating a flat result from either external or sequential strings. | 5668 // Handle creating a flat result from either external or sequential strings. |
5674 // Locate the first characters' locations. | 5669 // Locate the first characters' locations. |
5675 // r0: first string | 5670 // r0: first string |
5676 // r1: second string | 5671 // r1: second string |
5677 // r2: length of first string | 5672 // r2: length of first string |
5678 // r3: length of second string | 5673 // r3: length of second string |
5679 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 5674 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
5680 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 5675 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
5681 // r6: sum of lengths. | 5676 // r6: sum of lengths. |
5682 Label first_prepared, second_prepared; | 5677 Label first_prepared, second_prepared; |
5683 __ bind(&string_add_flat_result); | 5678 __ bind(&string_add_flat_result); |
5684 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { | 5679 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
5685 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5680 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
5686 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 5681 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
5687 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 5682 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
5688 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 5683 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
5689 } | 5684 } |
5690 | 5685 |
5691 // Check whether both strings have same encoding | 5686 // Check whether both strings have same encoding |
5692 __ eor(r7, r4, Operand(r5)); | 5687 __ eor(ip, r4, Operand(r5)); |
5693 __ tst(r7, Operand(kStringEncodingMask)); | 5688 __ tst(ip, Operand(kStringEncodingMask)); |
Rodolph Perfetta
2013/07/30 15:19:08
if you want to use ip you need to make sure kStri
rmcilroy
2013/07/30 17:12:37
Done.
| |
5694 __ b(ne, &call_runtime); | 5689 __ b(ne, &call_runtime); |
5695 | 5690 |
5696 STATIC_ASSERT(kSeqStringTag == 0); | 5691 STATIC_ASSERT(kSeqStringTag == 0); |
5697 __ tst(r4, Operand(kStringRepresentationMask)); | 5692 __ tst(r4, Operand(kStringRepresentationMask)); |
5698 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 5693 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
5699 __ add(r7, | 5694 __ add(r6, |
5700 r0, | 5695 r0, |
5701 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), | 5696 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), |
5702 LeaveCC, | 5697 LeaveCC, |
5703 eq); | 5698 eq); |
5704 __ b(eq, &first_prepared); | 5699 __ b(eq, &first_prepared); |
5705 // External string: rule out short external string and load string resource. | 5700 // External string: rule out short external string and load string resource. |
5706 STATIC_ASSERT(kShortExternalStringTag != 0); | 5701 STATIC_ASSERT(kShortExternalStringTag != 0); |
5707 __ tst(r4, Operand(kShortExternalStringMask)); | 5702 __ tst(r4, Operand(kShortExternalStringMask)); |
5708 __ b(ne, &call_runtime); | 5703 __ b(ne, &call_runtime); |
5709 __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); | 5704 __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); |
5710 __ bind(&first_prepared); | 5705 __ bind(&first_prepared); |
5711 | 5706 |
5712 STATIC_ASSERT(kSeqStringTag == 0); | 5707 STATIC_ASSERT(kSeqStringTag == 0); |
5713 __ tst(r5, Operand(kStringRepresentationMask)); | 5708 __ tst(r5, Operand(kStringRepresentationMask)); |
5714 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 5709 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
5715 __ add(r1, | 5710 __ add(r1, |
5716 r1, | 5711 r1, |
5717 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), | 5712 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), |
5718 LeaveCC, | 5713 LeaveCC, |
5719 eq); | 5714 eq); |
5720 __ b(eq, &second_prepared); | 5715 __ b(eq, &second_prepared); |
5721 // External string: rule out short external string and load string resource. | 5716 // External string: rule out short external string and load string resource. |
5722 STATIC_ASSERT(kShortExternalStringTag != 0); | 5717 STATIC_ASSERT(kShortExternalStringTag != 0); |
5723 __ tst(r5, Operand(kShortExternalStringMask)); | 5718 __ tst(r5, Operand(kShortExternalStringMask)); |
5724 __ b(ne, &call_runtime); | 5719 __ b(ne, &call_runtime); |
5725 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); | 5720 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); |
5726 __ bind(&second_prepared); | 5721 __ bind(&second_prepared); |
5727 | 5722 |
5728 Label non_ascii_string_add_flat_result; | 5723 Label non_ascii_string_add_flat_result; |
5729 // r7: first character of first string | 5724 // r6: first character of first string |
5730 // r1: first character of second string | 5725 // r1: first character of second string |
5731 // r2: length of first string. | 5726 // r2: length of first string. |
5732 // r3: length of second string. | 5727 // r3: length of second string. |
5733 // r6: sum of lengths. | |
5734 // Both strings have the same encoding. | 5728 // Both strings have the same encoding. |
5735 STATIC_ASSERT(kTwoByteStringTag == 0); | 5729 STATIC_ASSERT(kTwoByteStringTag == 0); |
5736 __ tst(r5, Operand(kStringEncodingMask)); | 5730 __ tst(r5, Operand(kStringEncodingMask)); |
5737 __ b(eq, &non_ascii_string_add_flat_result); | 5731 __ b(eq, &non_ascii_string_add_flat_result); |
5738 | 5732 |
5739 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); | 5733 __ add(r2, r2, Operand(r3)); |
5740 __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 5734 __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime); |
5735 __ sub(r2, r2, Operand(r3)); | |
5736 __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | |
5741 // r0: result string. | 5737 // r0: result string. |
5742 // r7: first character of first string. | 5738 // r6: first character of first string. |
5743 // r1: first character of second string. | 5739 // r1: first character of second string. |
5744 // r2: length of first string. | 5740 // r2: length of first string. |
5745 // r3: length of second string. | 5741 // r3: length of second string. |
5746 // r6: first character of result. | 5742 // r5: first character of result. |
5747 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); | 5743 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true); |
5748 // r6: next character of result. | 5744 // r5: next character of result. |
5749 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); | 5745 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true); |
5750 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5746 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
5751 __ add(sp, sp, Operand(2 * kPointerSize)); | 5747 __ add(sp, sp, Operand(2 * kPointerSize)); |
5752 __ Ret(); | 5748 __ Ret(); |
5753 | 5749 |
5754 __ bind(&non_ascii_string_add_flat_result); | 5750 __ bind(&non_ascii_string_add_flat_result); |
5755 __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); | 5751 __ add(r2, r2, Operand(r3)); |
5756 __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 5752 __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime); |
5753 __ sub(r2, r2, Operand(r3)); | |
5754 __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | |
5757 // r0: result string. | 5755 // r0: result string. |
5758 // r7: first character of first string. | 5756 // r6: first character of first string. |
5759 // r1: first character of second string. | 5757 // r1: first character of second string. |
5760 // r2: length of first string. | 5758 // r2: length of first string. |
5761 // r3: length of second string. | 5759 // r3: length of second string. |
5762 // r6: first character of result. | 5760 // r5: first character of result. |
5763 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); | 5761 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false); |
5764 // r6: next character of result. | 5762 // r5: next character of result. |
5765 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); | 5763 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false); |
5766 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5764 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
5767 __ add(sp, sp, Operand(2 * kPointerSize)); | 5765 __ add(sp, sp, Operand(2 * kPointerSize)); |
5768 __ Ret(); | 5766 __ Ret(); |
5769 | 5767 |
5770 // Just jump to runtime to add the two strings. | 5768 // Just jump to runtime to add the two strings. |
5771 __ bind(&call_runtime); | 5769 __ bind(&call_runtime); |
5772 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { | 5770 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { |
5773 GenerateRegisterArgsPop(masm); | 5771 GenerateRegisterArgsPop(masm); |
5774 // Build a frame | 5772 // Build a frame |
5775 { | 5773 { |
(...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6456 struct AheadOfTimeWriteBarrierStubList { | 6454 struct AheadOfTimeWriteBarrierStubList { |
6457 Register object, value, address; | 6455 Register object, value, address; |
6458 RememberedSetAction action; | 6456 RememberedSetAction action; |
6459 }; | 6457 }; |
6460 | 6458 |
6461 | 6459 |
6462 #define REG(Name) { kRegister_ ## Name ## _Code } | 6460 #define REG(Name) { kRegister_ ## Name ## _Code } |
6463 | 6461 |
6464 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { | 6462 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { |
6465 // Used in RegExpExecStub. | 6463 // Used in RegExpExecStub. |
6466 { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, | 6464 { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET }, |
6467 // Used in CompileArrayPushCall. | 6465 // Used in CompileArrayPushCall. |
6468 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. | 6466 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. |
6469 // Also used in KeyedStoreIC::GenerateGeneric. | 6467 // Also used in KeyedStoreIC::GenerateGeneric. |
6470 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, | 6468 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, |
6471 // Used in CompileStoreGlobal. | 6469 // Used in CompileStoreGlobal. |
6472 { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET }, | 6470 { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET }, |
6473 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. | 6471 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. |
6474 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, | 6472 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, |
6475 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, | 6473 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, |
6476 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. | 6474 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. |
6477 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, | 6475 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, |
6478 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, | 6476 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, |
6479 // KeyedStoreStubCompiler::GenerateStoreFastElement. | 6477 // KeyedStoreStubCompiler::GenerateStoreFastElement. |
6480 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, | 6478 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, |
6481 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, | 6479 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, |
6482 // ElementsTransitionGenerator::GenerateMapChangeElementTransition | 6480 // ElementsTransitionGenerator::GenerateMapChangeElementTransition |
6483 // and ElementsTransitionGenerator::GenerateSmiToDouble | 6481 // and ElementsTransitionGenerator::GenerateSmiToDouble |
6484 // and ElementsTransitionGenerator::GenerateDoubleToObject | 6482 // and ElementsTransitionGenerator::GenerateDoubleToObject |
6485 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, | 6483 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, |
6486 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, | 6484 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, |
6487 // ElementsTransitionGenerator::GenerateDoubleToObject | 6485 // ElementsTransitionGenerator::GenerateDoubleToObject |
6488 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, | 6486 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, |
6489 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, | 6487 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, |
6490 // StoreArrayLiteralElementStub::Generate | 6488 // StoreArrayLiteralElementStub::Generate |
6491 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, | 6489 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, |
6492 // FastNewClosureStub::Generate | 6490 // FastNewClosureStub::Generate |
6493 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, | 6491 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, |
6494 // StringAddStub::Generate | 6492 // StringAddStub::Generate |
6495 { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, | 6493 { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, |
6496 { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, | 6494 { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, |
6497 // Null termination. | 6495 // Null termination. |
6498 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} | 6496 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} |
6499 }; | 6497 }; |
6500 | 6498 |
6501 #undef REG | 6499 #undef REG |
6502 | 6500 |
6503 | 6501 |
6504 bool RecordWriteStub::IsPregenerated() { | 6502 bool RecordWriteStub::IsPregenerated() { |
6505 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; | 6503 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
6506 !entry->object.is(no_reg); | 6504 !entry->object.is(no_reg); |
(...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7175 __ bind(&fast_elements_case); | 7173 __ bind(&fast_elements_case); |
7176 GenerateCase(masm, FAST_ELEMENTS); | 7174 GenerateCase(masm, FAST_ELEMENTS); |
7177 } | 7175 } |
7178 | 7176 |
7179 | 7177 |
7180 #undef __ | 7178 #undef __ |
7181 | 7179 |
7182 } } // namespace v8::internal | 7180 } } // namespace v8::internal |
7183 | 7181 |
7184 #endif // V8_TARGET_ARCH_ARM | 7182 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |