Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(217)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 21063002: Out-of-line constant pool on Arm: Stage 1 - Free up r7 for use as constant pool pointer register (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Address Rodolph's comments. Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/deoptimizer-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after
326 Counters* counters = masm->isolate()->counters(); 326 Counters* counters = masm->isolate()->counters();
327 327
328 Label gc; 328 Label gc;
329 329
330 // Pop the function info from the stack. 330 // Pop the function info from the stack.
331 __ pop(r3); 331 __ pop(r3);
332 332
333 // Attempt to allocate new JSFunction in new space. 333 // Attempt to allocate new JSFunction in new space.
334 __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT); 334 __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT);
335 335
336 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); 336 __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r4);
337 337
338 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); 338 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
339 339
340 // Compute the function map in the current native context and set that 340 // Compute the function map in the current native context and set that
341 // as the map of the allocated object. 341 // as the map of the allocated object.
342 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 342 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
343 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); 343 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
344 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index))); 344 __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
345 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset)); 345 __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
346 346
(...skipping 24 matching lines...) Expand all
371 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); 371 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
372 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); 372 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
373 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); 373 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
374 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); 374 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
375 375
376 // Return result. The argument function info has been popped already. 376 // Return result. The argument function info has been popped already.
377 __ Ret(); 377 __ Ret();
378 378
379 __ bind(&check_optimized); 379 __ bind(&check_optimized);
380 380
381 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7); 381 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r4);
382 382
383 // r2 holds native context, r1 points to fixed array of 3-element entries 383 // r2 holds native context, r1 points to fixed array of 3-element entries
384 // (native context, optimized code, literals). 384 // (native context, optimized code, literals).
385 // The optimized code map must never be empty, so check the first elements. 385 // The optimized code map must never be empty, so check the first elements.
386 Label install_optimized; 386 Label install_optimized;
387 // Speculatively move code object into r4. 387 // Speculatively move code object into r4.
388 __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot)); 388 __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot));
389 __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot)); 389 __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot));
390 __ cmp(r2, r5); 390 __ cmp(r2, r5);
391 __ b(eq, &install_optimized); 391 __ b(eq, &install_optimized);
(...skipping 12 matching lines...) Expand all
404 __ cmp(r2, r5); 404 __ cmp(r2, r5);
405 __ b(ne, &loop); 405 __ b(ne, &loop);
406 // Hit: fetch the optimized code. 406 // Hit: fetch the optimized code.
407 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 407 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
408 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); 408 __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
409 __ add(r5, r5, Operand(kPointerSize)); 409 __ add(r5, r5, Operand(kPointerSize));
410 __ ldr(r4, MemOperand(r5)); 410 __ ldr(r4, MemOperand(r5));
411 411
412 __ bind(&install_optimized); 412 __ bind(&install_optimized);
413 __ IncrementCounter(counters->fast_new_closure_install_optimized(), 413 __ IncrementCounter(counters->fast_new_closure_install_optimized(),
414 1, r6, r7); 414 1, r6, r5);
415 415
416 // TODO(fschneider): Idea: store proper code pointers in the map and either 416 // TODO(fschneider): Idea: store proper code pointers in the map and either
417 // unmangle them on marking or do nothing as the whole map is discarded on 417 // unmangle them on marking or do nothing as the whole map is discarded on
418 // major GC anyway. 418 // major GC anyway.
419 __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); 419 __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
420 __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); 420 __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
421 421
422 // Now link a function into a list of optimized functions. 422 // Now link a function into a list of optimized functions.
423 __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); 423 __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
424 424
(...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after
842 } else { 842 } else {
843 // Smi compared non-strictly with a non-Smi non-heap-number. Call 843 // Smi compared non-strictly with a non-Smi non-heap-number. Call
844 // the runtime. 844 // the runtime.
845 __ b(ne, slow); 845 __ b(ne, slow);
846 } 846 }
847 847
848 // Lhs is a smi, rhs is a number. 848 // Lhs is a smi, rhs is a number.
849 // Convert lhs to a double in d7. 849 // Convert lhs to a double in d7.
850 __ SmiToDouble(d7, lhs); 850 __ SmiToDouble(d7, lhs);
851 // Load the double from rhs, tagged HeapNumber r0, to d6. 851 // Load the double from rhs, tagged HeapNumber r0, to d6.
852 __ sub(r7, rhs, Operand(kHeapObjectTag)); 852 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
853 __ vldr(d6, r7, HeapNumber::kValueOffset);
854 853
855 // We now have both loaded as doubles but we can skip the lhs nan check 854 // We now have both loaded as doubles but we can skip the lhs nan check
856 // since it's a smi. 855 // since it's a smi.
857 __ jmp(lhs_not_nan); 856 __ jmp(lhs_not_nan);
858 857
859 __ bind(&rhs_is_smi); 858 __ bind(&rhs_is_smi);
860 // Rhs is a smi. Check whether the non-smi lhs is a heap number. 859 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
861 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); 860 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
862 if (strict) { 861 if (strict) {
863 // If lhs is not a number and rhs is a smi then strict equality cannot 862 // If lhs is not a number and rhs is a smi then strict equality cannot
864 // succeed. Return non-equal. 863 // succeed. Return non-equal.
865 // If lhs is r0 then there is already a non zero value in it. 864 // If lhs is r0 then there is already a non zero value in it.
866 if (!lhs.is(r0)) { 865 if (!lhs.is(r0)) {
867 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); 866 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
868 } 867 }
869 __ Ret(ne); 868 __ Ret(ne);
870 } else { 869 } else {
871 // Smi compared non-strictly with a non-smi non-heap-number. Call 870 // Smi compared non-strictly with a non-smi non-heap-number. Call
872 // the runtime. 871 // the runtime.
873 __ b(ne, slow); 872 __ b(ne, slow);
874 } 873 }
875 874
876 // Rhs is a smi, lhs is a heap number. 875 // Rhs is a smi, lhs is a heap number.
877 // Load the double from lhs, tagged HeapNumber r1, to d7. 876 // Load the double from lhs, tagged HeapNumber r1, to d7.
878 __ sub(r7, lhs, Operand(kHeapObjectTag)); 877 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
879 __ vldr(d7, r7, HeapNumber::kValueOffset);
880 // Convert rhs to a double in d6 . 878 // Convert rhs to a double in d6 .
881 __ SmiToDouble(d6, rhs); 879 __ SmiToDouble(d6, rhs);
882 // Fall through to both_loaded_as_doubles. 880 // Fall through to both_loaded_as_doubles.
883 } 881 }
884 882
885 883
886 // See comment at call site. 884 // See comment at call site.
887 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 885 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
888 Register lhs, 886 Register lhs,
889 Register rhs) { 887 Register rhs) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
937 (lhs.is(r1) && rhs.is(r0))); 935 (lhs.is(r1) && rhs.is(r0)));
938 936
939 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); 937 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
940 __ b(ne, not_heap_numbers); 938 __ b(ne, not_heap_numbers);
941 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); 939 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
942 __ cmp(r2, r3); 940 __ cmp(r2, r3);
943 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. 941 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
944 942
945 // Both are heap numbers. Load them up then jump to the code we have 943 // Both are heap numbers. Load them up then jump to the code we have
946 // for that. 944 // for that.
947 __ sub(r7, rhs, Operand(kHeapObjectTag)); 945 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
948 __ vldr(d6, r7, HeapNumber::kValueOffset); 946 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
949 __ sub(r7, lhs, Operand(kHeapObjectTag));
950 __ vldr(d7, r7, HeapNumber::kValueOffset);
951 __ jmp(both_loaded_as_doubles); 947 __ jmp(both_loaded_as_doubles);
952 } 948 }
953 949
954 950
955 // Fast negative check for internalized-to-internalized equality. 951 // Fast negative check for internalized-to-internalized equality.
956 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, 952 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
957 Register lhs, 953 Register lhs,
958 Register rhs, 954 Register rhs,
959 Label* possible_strings, 955 Label* possible_strings,
960 Label* not_both_strings) { 956 Label* not_both_strings) {
(...skipping 409 matching lines...) Expand 10 before | Expand all | Expand 10 after
1370 } 1366 }
1371 1367
1372 1368
1373 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( 1369 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1374 MacroAssembler* masm) { 1370 MacroAssembler* masm) {
1375 UNIMPLEMENTED(); 1371 UNIMPLEMENTED();
1376 } 1372 }
1377 1373
1378 1374
1379 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, 1375 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
1380 Token::Value op) { 1376 Token::Value op,
1377 Register scratch1,
1378 Register scratch2) {
1381 Register left = r1; 1379 Register left = r1;
1382 Register right = r0; 1380 Register right = r0;
1383 Register scratch1 = r7;
1384 Register scratch2 = r9;
1385 1381
1386 ASSERT(right.is(r0)); 1382 ASSERT(right.is(r0));
1383 ASSERT(!AreAliased(left, right, scratch1, scratch2, ip));
1387 STATIC_ASSERT(kSmiTag == 0); 1384 STATIC_ASSERT(kSmiTag == 0);
1388 1385
1389 Label not_smi_result; 1386 Label not_smi_result;
1390 switch (op) { 1387 switch (op) {
1391 case Token::ADD: 1388 case Token::ADD:
1392 __ add(right, left, Operand(right), SetCC); // Add optimistically. 1389 __ add(right, left, Operand(right), SetCC); // Add optimistically.
1393 __ Ret(vc); 1390 __ Ret(vc);
1394 __ sub(right, right, Operand(left)); // Revert optimistic add. 1391 __ sub(right, right, Operand(left)); // Revert optimistic add.
1395 break; 1392 break;
1396 case Token::SUB: 1393 case Token::SUB:
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
1591 1588
1592 1589
1593 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, 1590 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
1594 BinaryOpIC::TypeInfo left_type, 1591 BinaryOpIC::TypeInfo left_type,
1595 BinaryOpIC::TypeInfo right_type, 1592 BinaryOpIC::TypeInfo right_type,
1596 bool smi_operands, 1593 bool smi_operands,
1597 Label* not_numbers, 1594 Label* not_numbers,
1598 Label* gc_required, 1595 Label* gc_required,
1599 Label* miss, 1596 Label* miss,
1600 Token::Value op, 1597 Token::Value op,
1601 OverwriteMode mode) { 1598 OverwriteMode mode,
1599 Register scratch1,
1600 Register scratch2,
1601 Register scratch3,
1602 Register scratch4) {
1602 Register left = r1; 1603 Register left = r1;
1603 Register right = r0; 1604 Register right = r0;
1604 Register scratch1 = r6; 1605 Register result = scratch3;
1605 Register scratch2 = r7; 1606 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
1606 Register scratch3 = r4;
1607 1607
1608 ASSERT(smi_operands || (not_numbers != NULL)); 1608 ASSERT(smi_operands || (not_numbers != NULL));
1609 if (smi_operands) { 1609 if (smi_operands) {
1610 __ AssertSmi(left); 1610 __ AssertSmi(left);
1611 __ AssertSmi(right); 1611 __ AssertSmi(right);
1612 } 1612 }
1613 if (left_type == BinaryOpIC::SMI) { 1613 if (left_type == BinaryOpIC::SMI) {
1614 __ JumpIfNotSmi(left, miss); 1614 __ JumpIfNotSmi(left, miss);
1615 } 1615 }
1616 if (right_type == BinaryOpIC::SMI) { 1616 if (right_type == BinaryOpIC::SMI) {
1617 __ JumpIfNotSmi(right, miss); 1617 __ JumpIfNotSmi(right, miss);
1618 } 1618 }
1619 1619
1620 Register heap_number_map = r9; 1620 Register heap_number_map = scratch4;
1621 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1621 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1622 1622
1623 switch (op) { 1623 switch (op) {
1624 case Token::ADD: 1624 case Token::ADD:
1625 case Token::SUB: 1625 case Token::SUB:
1626 case Token::MUL: 1626 case Token::MUL:
1627 case Token::DIV: 1627 case Token::DIV:
1628 case Token::MOD: { 1628 case Token::MOD: {
1629 // Allocate new heap number for result. 1629 // Allocate new heap number for result.
1630 Register result = r5;
1631 BinaryOpStub_GenerateHeapResultAllocation( 1630 BinaryOpStub_GenerateHeapResultAllocation(
1632 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); 1631 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
1633 1632
1634 // Load left and right operands into d0 and d1. 1633 // Load left and right operands into d0 and d1.
1635 if (smi_operands) { 1634 if (smi_operands) {
1636 __ SmiToDouble(d1, right); 1635 __ SmiToDouble(d1, right);
1637 __ SmiToDouble(d0, left); 1636 __ SmiToDouble(d0, left);
1638 } else { 1637 } else {
1639 // Load right operand into d1. 1638 // Load right operand into d1.
1640 if (right_type == BinaryOpIC::INT32) { 1639 if (right_type == BinaryOpIC::INT32) {
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1743 default: 1742 default:
1744 UNREACHABLE(); 1743 UNREACHABLE();
1745 } 1744 }
1746 1745
1747 // Check that the *signed* result fits in a smi. 1746 // Check that the *signed* result fits in a smi.
1748 __ TrySmiTag(r0, r2, &result_not_a_smi); 1747 __ TrySmiTag(r0, r2, &result_not_a_smi);
1749 __ Ret(); 1748 __ Ret();
1750 1749
1751 // Allocate new heap number for result. 1750 // Allocate new heap number for result.
1752 __ bind(&result_not_a_smi); 1751 __ bind(&result_not_a_smi);
1753 Register result = r5;
1754 if (smi_operands) { 1752 if (smi_operands) {
1755 __ AllocateHeapNumber( 1753 __ AllocateHeapNumber(
1756 result, scratch1, scratch2, heap_number_map, gc_required); 1754 result, scratch1, scratch2, heap_number_map, gc_required);
1757 } else { 1755 } else {
1758 BinaryOpStub_GenerateHeapResultAllocation( 1756 BinaryOpStub_GenerateHeapResultAllocation(
1759 masm, result, heap_number_map, scratch1, scratch2, gc_required, 1757 masm, result, heap_number_map, scratch1, scratch2, gc_required,
1760 mode); 1758 mode);
1761 } 1759 }
1762 1760
1763 // r2: Answer as signed int32. 1761 // r2: Answer as signed int32.
1764 // r5: Heap number to write answer into. 1762 // result: Heap number to write answer into.
1765 1763
1766 // Nothing can go wrong now, so move the heap number to r0, which is the 1764 // Nothing can go wrong now, so move the heap number to r0, which is the
1767 // result. 1765 // result.
1768 __ mov(r0, Operand(r5)); 1766 __ mov(r0, Operand(result));
1769 1767
1770 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As 1768 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
1771 // mentioned above SHR needs to always produce a positive result. 1769 // mentioned above SHR needs to always produce a positive result.
1772 __ vmov(s0, r2); 1770 __ vmov(s0, r2);
1773 if (op == Token::SHR) { 1771 if (op == Token::SHR) {
1774 __ vcvt_f64_u32(d0, s0); 1772 __ vcvt_f64_u32(d0, s0);
1775 } else { 1773 } else {
1776 __ vcvt_f64_s32(d0, s0); 1774 __ vcvt_f64_s32(d0, s0);
1777 } 1775 }
1778 __ sub(r3, r0, Operand(kHeapObjectTag)); 1776 __ sub(r3, r0, Operand(kHeapObjectTag));
(...skipping 10 matching lines...) Expand all
1789 // Generate the smi code. If the operation on smis are successful this return is 1787 // Generate the smi code. If the operation on smis are successful this return is
1790 // generated. If the result is not a smi and heap number allocation is not 1788 // generated. If the result is not a smi and heap number allocation is not
1791 // requested the code falls through. If number allocation is requested but a 1789 // requested the code falls through. If number allocation is requested but a
1792 // heap number cannot be allocated the code jumps to the label gc_required. 1790 // heap number cannot be allocated the code jumps to the label gc_required.
1793 void BinaryOpStub_GenerateSmiCode( 1791 void BinaryOpStub_GenerateSmiCode(
1794 MacroAssembler* masm, 1792 MacroAssembler* masm,
1795 Label* use_runtime, 1793 Label* use_runtime,
1796 Label* gc_required, 1794 Label* gc_required,
1797 Token::Value op, 1795 Token::Value op,
1798 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, 1796 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
1799 OverwriteMode mode) { 1797 OverwriteMode mode,
1798 Register scratch1,
1799 Register scratch2,
1800 Register scratch3,
1801 Register scratch4) {
1800 Label not_smis; 1802 Label not_smis;
1801 1803
1802 Register left = r1; 1804 Register left = r1;
1803 Register right = r0; 1805 Register right = r0;
1804 Register scratch1 = r7; 1806 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
1805 1807
1806 // Perform combined smi check on both operands. 1808 // Perform combined smi check on both operands.
1807 __ orr(scratch1, left, Operand(right)); 1809 __ orr(scratch1, left, Operand(right));
1808 __ JumpIfNotSmi(scratch1, &not_smis); 1810 __ JumpIfNotSmi(scratch1, &not_smis);
1809 1811
1810 // If the smi-smi operation results in a smi return is generated. 1812 // If the smi-smi operation results in a smi return is generated.
1811 BinaryOpStub_GenerateSmiSmiOperation(masm, op); 1813 BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2);
1812 1814
1813 // If heap number results are possible generate the result in an allocated 1815 // If heap number results are possible generate the result in an allocated
1814 // heap number. 1816 // heap number.
1815 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { 1817 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
1816 BinaryOpStub_GenerateFPOperation( 1818 BinaryOpStub_GenerateFPOperation(
1817 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, 1819 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
1818 use_runtime, gc_required, &not_smis, op, mode); 1820 use_runtime, gc_required, &not_smis, op, mode, scratch2, scratch3,
1821 scratch1, scratch4);
1819 } 1822 }
1820 __ bind(&not_smis); 1823 __ bind(&not_smis);
1821 } 1824 }
1822 1825
1823 1826
1824 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 1827 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1825 Label right_arg_changed, call_runtime; 1828 Label right_arg_changed, call_runtime;
1826 1829
1827 if (op_ == Token::MOD && encoded_right_arg_.has_value) { 1830 if (op_ == Token::MOD && encoded_right_arg_.has_value) {
1828 // It is guaranteed that the value will fit into a Smi, because if it 1831 // It is guaranteed that the value will fit into a Smi, because if it
1829 // didn't, we wouldn't be here, see BinaryOp_Patch. 1832 // didn't, we wouldn't be here, see BinaryOp_Patch.
1830 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); 1833 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
1831 __ b(ne, &right_arg_changed); 1834 __ b(ne, &right_arg_changed);
1832 } 1835 }
1833 1836
1834 if (result_type_ == BinaryOpIC::UNINITIALIZED || 1837 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1835 result_type_ == BinaryOpIC::SMI) { 1838 result_type_ == BinaryOpIC::SMI) {
1836 // Only allow smi results. 1839 // Only allow smi results.
1837 BinaryOpStub_GenerateSmiCode( 1840 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_,
1838 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); 1841 NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
1839 } else { 1842 } else {
1840 // Allow heap number result and don't make a transition if a heap number 1843 // Allow heap number result and don't make a transition if a heap number
1841 // cannot be allocated. 1844 // cannot be allocated.
1842 BinaryOpStub_GenerateSmiCode( 1845 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_,
1843 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, 1846 ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
1844 mode_);
1845 } 1847 }
1846 1848
1847 // Code falls through if the result is not returned as either a smi or heap 1849 // Code falls through if the result is not returned as either a smi or heap
1848 // number. 1850 // number.
1849 __ bind(&right_arg_changed); 1851 __ bind(&right_arg_changed);
1850 GenerateTypeTransition(masm); 1852 GenerateTypeTransition(masm);
1851 1853
1852 __ bind(&call_runtime); 1854 __ bind(&call_runtime);
1853 { 1855 {
1854 FrameScope scope(masm, StackFrame::INTERNAL); 1856 FrameScope scope(masm, StackFrame::INTERNAL);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1888 __ bind(&call_runtime); 1890 __ bind(&call_runtime);
1889 GenerateTypeTransition(masm); 1891 GenerateTypeTransition(masm);
1890 } 1892 }
1891 1893
1892 1894
1893 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 1895 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1894 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); 1896 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1895 1897
1896 Register left = r1; 1898 Register left = r1;
1897 Register right = r0; 1899 Register right = r0;
1898 Register scratch1 = r7; 1900 Register scratch1 = r4;
1899 Register scratch2 = r9; 1901 Register scratch2 = r9;
1902 Register scratch3 = r5;
1900 LowDwVfpRegister double_scratch = d0; 1903 LowDwVfpRegister double_scratch = d0;
1901 1904
1902 Register heap_number_result = no_reg; 1905 Register heap_number_result = no_reg;
1903 Register heap_number_map = r6; 1906 Register heap_number_map = r6;
1904 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1907 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1905 1908
1906 Label call_runtime; 1909 Label call_runtime;
1907 // Labels for type transition, used for wrong input or output types. 1910 // Labels for type transition, used for wrong input or output types.
1908 // Both label are currently actually bound to the same position. We use two 1911 // Both label are currently actually bound to the same position. We use two
1909 // different label to differentiate the cause leading to type transition. 1912 // different label to differentiate the cause leading to type transition.
1910 Label transition; 1913 Label transition;
1911 1914
1912 // Smi-smi fast case. 1915 // Smi-smi fast case.
1913 Label skip; 1916 Label skip;
1914 __ orr(scratch1, left, right); 1917 __ orr(scratch1, left, right);
1915 __ JumpIfNotSmi(scratch1, &skip); 1918 __ JumpIfNotSmi(scratch1, &skip);
1916 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); 1919 BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3);
1917 // Fall through if the result is not a smi. 1920 // Fall through if the result is not a smi.
1918 __ bind(&skip); 1921 __ bind(&skip);
1919 1922
1920 switch (op_) { 1923 switch (op_) {
1921 case Token::ADD: 1924 case Token::ADD:
1922 case Token::SUB: 1925 case Token::SUB:
1923 case Token::MUL: 1926 case Token::MUL:
1924 case Token::DIV: 1927 case Token::DIV:
1925 case Token::MOD: { 1928 case Token::MOD: {
1926 // It could be that only SMIs have been seen at either the left 1929 // It could be that only SMIs have been seen at either the left
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
2000 // A DIV operation expecting an integer result falls through 2003 // A DIV operation expecting an integer result falls through
2001 // to type transition. 2004 // to type transition.
2002 2005
2003 } else { 2006 } else {
2004 if (encoded_right_arg_.has_value) { 2007 if (encoded_right_arg_.has_value) {
2005 __ Vmov(d8, fixed_right_arg_value(), scratch1); 2008 __ Vmov(d8, fixed_right_arg_value(), scratch1);
2006 __ VFPCompareAndSetFlags(d1, d8); 2009 __ VFPCompareAndSetFlags(d1, d8);
2007 __ b(ne, &transition); 2010 __ b(ne, &transition);
2008 } 2011 }
2009 2012
2010 // We preserved r0 and r1 to be able to call runtime.
2011 // Save the left value on the stack.
2012 __ Push(r5, r4);
2013
2014 Label pop_and_call_runtime;
2015
2016 // Allocate a heap number to store the result. 2013 // Allocate a heap number to store the result.
2017 heap_number_result = r5; 2014 heap_number_result = r5;
2018 BinaryOpStub_GenerateHeapResultAllocation(masm, 2015 BinaryOpStub_GenerateHeapResultAllocation(masm,
2019 heap_number_result, 2016 heap_number_result,
2020 heap_number_map, 2017 heap_number_map,
2021 scratch1, 2018 scratch1,
2022 scratch2, 2019 scratch2,
2023 &pop_and_call_runtime, 2020 &call_runtime,
2024 mode_); 2021 mode_);
2025 2022
2026 // Load the left value from the value saved on the stack.
2027 __ Pop(r1, r0);
2028
2029 // Call the C function to handle the double operation. 2023 // Call the C function to handle the double operation.
2030 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); 2024 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
2031 if (FLAG_debug_code) { 2025 if (FLAG_debug_code) {
2032 __ stop("Unreachable code."); 2026 __ stop("Unreachable code.");
2033 } 2027 }
2034 2028
2035 __ bind(&pop_and_call_runtime);
2036 __ Drop(2);
2037 __ b(&call_runtime); 2029 __ b(&call_runtime);
2038 } 2030 }
2039 2031
2040 break; 2032 break;
2041 } 2033 }
2042 2034
2043 case Token::BIT_OR: 2035 case Token::BIT_OR:
2044 case Token::BIT_XOR: 2036 case Token::BIT_XOR:
2045 case Token::BIT_AND: 2037 case Token::BIT_AND:
2046 case Token::SAR: 2038 case Token::SAR:
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
2177 __ bind(&done); 2169 __ bind(&done);
2178 2170
2179 GenerateNumberStub(masm); 2171 GenerateNumberStub(masm);
2180 } 2172 }
2181 2173
2182 2174
2183 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { 2175 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
2184 Label call_runtime, transition; 2176 Label call_runtime, transition;
2185 BinaryOpStub_GenerateFPOperation( 2177 BinaryOpStub_GenerateFPOperation(
2186 masm, left_type_, right_type_, false, 2178 masm, left_type_, right_type_, false,
2187 &transition, &call_runtime, &transition, op_, mode_); 2179 &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9);
2188 2180
2189 __ bind(&transition); 2181 __ bind(&transition);
2190 GenerateTypeTransition(masm); 2182 GenerateTypeTransition(masm);
2191 2183
2192 __ bind(&call_runtime); 2184 __ bind(&call_runtime);
2193 { 2185 {
2194 FrameScope scope(masm, StackFrame::INTERNAL); 2186 FrameScope scope(masm, StackFrame::INTERNAL);
2195 GenerateRegisterArgsPush(masm); 2187 GenerateRegisterArgsPush(masm);
2196 GenerateCallRuntime(masm); 2188 GenerateCallRuntime(masm);
2197 } 2189 }
2198 __ Ret(); 2190 __ Ret();
2199 } 2191 }
2200 2192
2201 2193
2202 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 2194 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2203 Label call_runtime, call_string_add_or_runtime, transition; 2195 Label call_runtime, call_string_add_or_runtime, transition;
2204 2196
2205 BinaryOpStub_GenerateSmiCode( 2197 BinaryOpStub_GenerateSmiCode(
2206 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); 2198 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_,
2199 r5, r6, r4, r9);
2207 2200
2208 BinaryOpStub_GenerateFPOperation( 2201 BinaryOpStub_GenerateFPOperation(
2209 masm, left_type_, right_type_, false, 2202 masm, left_type_, right_type_, false,
2210 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); 2203 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6,
2204 r4, r5, r9);
2211 2205
2212 __ bind(&transition); 2206 __ bind(&transition);
2213 GenerateTypeTransition(masm); 2207 GenerateTypeTransition(masm);
2214 2208
2215 __ bind(&call_string_add_or_runtime); 2209 __ bind(&call_string_add_or_runtime);
2216 if (op_ == Token::ADD) { 2210 if (op_ == Token::ADD) {
2217 GenerateAddStrings(masm); 2211 GenerateAddStrings(masm);
2218 } 2212 }
2219 2213
2220 __ bind(&call_runtime); 2214 __ bind(&call_runtime);
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
2302 // Untagged case: double input in d2, double result goes 2296 // Untagged case: double input in d2, double result goes
2303 // into d2. 2297 // into d2.
2304 // Tagged case: tagged input on top of stack and in r0, 2298 // Tagged case: tagged input on top of stack and in r0,
2305 // tagged result (heap number) goes into r0. 2299 // tagged result (heap number) goes into r0.
2306 2300
2307 Label input_not_smi; 2301 Label input_not_smi;
2308 Label loaded; 2302 Label loaded;
2309 Label calculate; 2303 Label calculate;
2310 Label invalid_cache; 2304 Label invalid_cache;
2311 const Register scratch0 = r9; 2305 const Register scratch0 = r9;
2312 const Register scratch1 = r7; 2306 const Register scratch1 = r4;
2313 const Register cache_entry = r0; 2307 const Register cache_entry = r0;
2314 const bool tagged = (argument_type_ == TAGGED); 2308 const bool tagged = (argument_type_ == TAGGED);
2315 2309
2316 if (tagged) { 2310 if (tagged) {
2317 // Argument is a number and is on stack and in r0. 2311 // Argument is a number and is on stack and in r0.
2318 // Load argument and check if it is a smi. 2312 // Load argument and check if it is a smi.
2319 __ JumpIfNotSmi(r0, &input_not_smi); 2313 __ JumpIfNotSmi(r0, &input_not_smi);
2320 2314
2321 // Input is a smi. Convert to double and load the low and high words 2315 // Input is a smi. Convert to double and load the low and high words
2322 // of the double into r2, r3. 2316 // of the double into r2, r3.
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
2385 __ add(r1, r1, Operand(r1, LSL, 1)); 2379 __ add(r1, r1, Operand(r1, LSL, 1));
2386 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); 2380 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
2387 // Check if cache matches: Double value is stored in uint32_t[2] array. 2381 // Check if cache matches: Double value is stored in uint32_t[2] array.
2388 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); 2382 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
2389 __ cmp(r2, r4); 2383 __ cmp(r2, r4);
2390 __ cmp(r3, r5, eq); 2384 __ cmp(r3, r5, eq);
2391 __ b(ne, &calculate); 2385 __ b(ne, &calculate);
2392 // Cache hit. Load result, cleanup and return. 2386 // Cache hit. Load result, cleanup and return.
2393 Counters* counters = masm->isolate()->counters(); 2387 Counters* counters = masm->isolate()->counters();
2394 __ IncrementCounter( 2388 __ IncrementCounter(
2395 counters->transcendental_cache_hit(), 1, scratch0, scratch1); 2389 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
ulan 2013/08/06 12:29:09 We're using scratch1 and r4 close to one another.
rmcilroy 2013/08/06 13:23:26 Done.
2396 if (tagged) { 2390 if (tagged) {
2397 // Pop input value from stack and load result into r0. 2391 // Pop input value from stack and load result into r0.
2398 __ pop(); 2392 __ pop();
2399 __ mov(r0, Operand(r6)); 2393 __ mov(r0, Operand(r6));
2400 } else { 2394 } else {
2401 // Load result into d2. 2395 // Load result into d2.
2402 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); 2396 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
2403 } 2397 }
2404 __ Ret(); 2398 __ Ret();
2405 2399
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
2534 const Register base = r1; 2528 const Register base = r1;
2535 const Register exponent = r2; 2529 const Register exponent = r2;
2536 const Register heapnumbermap = r5; 2530 const Register heapnumbermap = r5;
2537 const Register heapnumber = r0; 2531 const Register heapnumber = r0;
2538 const DwVfpRegister double_base = d1; 2532 const DwVfpRegister double_base = d1;
2539 const DwVfpRegister double_exponent = d2; 2533 const DwVfpRegister double_exponent = d2;
2540 const DwVfpRegister double_result = d3; 2534 const DwVfpRegister double_result = d3;
2541 const DwVfpRegister double_scratch = d0; 2535 const DwVfpRegister double_scratch = d0;
2542 const SwVfpRegister single_scratch = s0; 2536 const SwVfpRegister single_scratch = s0;
2543 const Register scratch = r9; 2537 const Register scratch = r9;
2544 const Register scratch2 = r7; 2538 const Register scratch2 = r4;
2545 2539
2546 Label call_runtime, done, int_exponent; 2540 Label call_runtime, done, int_exponent;
2547 if (exponent_type_ == ON_STACK) { 2541 if (exponent_type_ == ON_STACK) {
2548 Label base_is_smi, unpack_exponent; 2542 Label base_is_smi, unpack_exponent;
2549 // The exponent and base are supplied as arguments on the stack. 2543 // The exponent and base are supplied as arguments on the stack.
2550 // This can only happen if the stub is called from non-optimized code. 2544 // This can only happen if the stub is called from non-optimized code.
2551 // Load input parameters from stack to double registers. 2545 // Load input parameters from stack to double registers.
2552 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); 2546 __ ldr(base, MemOperand(sp, 1 * kPointerSize));
2553 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); 2547 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
2554 2548
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after
3043 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; 3037 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
3044 __ ldr(r4, MemOperand(sp, offset_to_argv)); 3038 __ ldr(r4, MemOperand(sp, offset_to_argv));
3045 3039
3046 // Push a frame with special values setup to mark it as an entry frame. 3040 // Push a frame with special values setup to mark it as an entry frame.
3047 // r0: code entry 3041 // r0: code entry
3048 // r1: function 3042 // r1: function
3049 // r2: receiver 3043 // r2: receiver
3050 // r3: argc 3044 // r3: argc
3051 // r4: argv 3045 // r4: argv
3052 Isolate* isolate = masm->isolate(); 3046 Isolate* isolate = masm->isolate();
3053 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3054 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; 3047 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3055 __ mov(r7, Operand(Smi::FromInt(marker))); 3048 __ mov(r8, Operand(Smi::FromInt(marker)));
3056 __ mov(r6, Operand(Smi::FromInt(marker))); 3049 __ mov(r6, Operand(Smi::FromInt(marker)));
3057 __ mov(r5, 3050 __ mov(r5,
3058 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); 3051 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
3059 __ ldr(r5, MemOperand(r5)); 3052 __ ldr(r5, MemOperand(r5));
3060 __ Push(r8, r7, r6, r5); 3053 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3054 __ Push(ip, r8, r6, r5);
3061 3055
3062 // Set up frame pointer for the frame to be pushed. 3056 // Set up frame pointer for the frame to be pushed.
3063 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); 3057 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
3064 3058
3065 // If this is the outermost JS call, set js_entry_sp value. 3059 // If this is the outermost JS call, set js_entry_sp value.
3066 Label non_outermost_js; 3060 Label non_outermost_js;
3067 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); 3061 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
3068 __ mov(r5, Operand(ExternalReference(js_entry_sp))); 3062 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
3069 __ ldr(r6, MemOperand(r5)); 3063 __ ldr(r6, MemOperand(r5));
3070 __ cmp(r6, Operand::Zero()); 3064 __ cmp(r6, Operand::Zero());
(...skipping 25 matching lines...) Expand all
3096 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 3090 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3097 isolate))); 3091 isolate)));
3098 } 3092 }
3099 __ str(r0, MemOperand(ip)); 3093 __ str(r0, MemOperand(ip));
3100 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); 3094 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3101 __ b(&exit); 3095 __ b(&exit);
3102 3096
3103 // Invoke: Link this frame into the handler chain. There's only one 3097 // Invoke: Link this frame into the handler chain. There's only one
3104 // handler block in this code object, so its index is 0. 3098 // handler block in this code object, so its index is 0.
3105 __ bind(&invoke); 3099 __ bind(&invoke);
3106 // Must preserve r0-r4, r5-r7 are available. 3100 // Must preserve r0-r4, r5-r6 are available.
3107 __ PushTryHandler(StackHandler::JS_ENTRY, 0); 3101 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
3108 // If an exception not caught by another handler occurs, this handler 3102 // If an exception not caught by another handler occurs, this handler
3109 // returns control to the code after the bl(&invoke) above, which 3103 // returns control to the code after the bl(&invoke) above, which
3110 // restores all kCalleeSaved registers (including cp and fp) to their 3104 // restores all kCalleeSaved registers (including cp and fp) to their
3111 // saved values before returning a failure to C. 3105 // saved values before returning a failure to C.
3112 3106
3113 // Clear any pending exceptions. 3107 // Clear any pending exceptions.
3114 __ mov(r5, Operand(isolate->factory()->the_hole_value())); 3108 __ mov(r5, Operand(isolate->factory()->the_hole_value()));
3115 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 3109 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3116 isolate))); 3110 isolate)));
(...skipping 587 matching lines...) Expand 10 before | Expand all | Expand 10 after
3704 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 3698 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3705 // The mapped parameter thus need to get indices 3699 // The mapped parameter thus need to get indices
3706 // MIN_CONTEXT_SLOTS+parameter_count-1 .. 3700 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
3707 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count 3701 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3708 // We loop from right to left. 3702 // We loop from right to left.
3709 Label parameters_loop, parameters_test; 3703 Label parameters_loop, parameters_test;
3710 __ mov(r6, r1); 3704 __ mov(r6, r1);
3711 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); 3705 __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
3712 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); 3706 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3713 __ sub(r9, r9, Operand(r1)); 3707 __ sub(r9, r9, Operand(r1));
3714 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); 3708 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
3715 __ add(r3, r4, Operand(r6, LSL, 1)); 3709 __ add(r3, r4, Operand(r6, LSL, 1));
3716 __ add(r3, r3, Operand(kParameterMapHeaderSize)); 3710 __ add(r3, r3, Operand(kParameterMapHeaderSize));
3717 3711
3718 // r6 = loop variable (tagged) 3712 // r6 = loop variable (tagged)
3719 // r1 = mapping index (tagged) 3713 // r1 = mapping index (tagged)
3720 // r3 = address of backing store (tagged) 3714 // r3 = address of backing store (tagged)
3721 // r4 = address of parameter map (tagged) 3715 // r4 = address of parameter map (tagged)
ulan 2013/08/06 12:29:09 r4 = address of parameter map (tagged) and r4 = ad
rmcilroy 2013/08/06 13:23:26 Done.
3722 // r5 = temporary scratch (a.o., for address calculation) 3716 // r0 = temporary scratch (a.o., for address calculation)
3723 // r7 = the hole value 3717 // r5 = the hole value
3724 __ jmp(&parameters_test); 3718 __ jmp(&parameters_test);
3725 3719
3726 __ bind(&parameters_loop); 3720 __ bind(&parameters_loop);
3727 __ sub(r6, r6, Operand(Smi::FromInt(1))); 3721 __ sub(r6, r6, Operand(Smi::FromInt(1)));
3728 __ mov(r5, Operand(r6, LSL, 1)); 3722 __ mov(r0, Operand(r6, LSL, 1));
3729 __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); 3723 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
3730 __ str(r9, MemOperand(r4, r5)); 3724 __ str(r9, MemOperand(r4, r0));
3731 __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); 3725 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
3732 __ str(r7, MemOperand(r3, r5)); 3726 __ str(r5, MemOperand(r3, r0));
3733 __ add(r9, r9, Operand(Smi::FromInt(1))); 3727 __ add(r9, r9, Operand(Smi::FromInt(1)));
3734 __ bind(&parameters_test); 3728 __ bind(&parameters_test);
3735 __ cmp(r6, Operand(Smi::FromInt(0))); 3729 __ cmp(r6, Operand(Smi::FromInt(0)));
3736 __ b(ne, &parameters_loop); 3730 __ b(ne, &parameters_loop);
3737 3731
3732 __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
ulan 2013/08/06 12:29:09 Please add a comment that we are restoring r0.
rmcilroy 2013/08/06 13:23:26 Done.
3733
3738 __ bind(&skip_parameter_map); 3734 __ bind(&skip_parameter_map);
3735 // r0 = address of new object (tagged)
3739 // r2 = argument count (tagged) 3736 // r2 = argument count (tagged)
3740 // r3 = address of backing store (tagged) 3737 // r3 = address of backing store (tagged)
3741 // r5 = scratch 3738 // r5 = scratch
3742 // Copy arguments header and remaining slots (if there are any). 3739 // Copy arguments header and remaining slots (if there are any).
3743 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); 3740 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
3744 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); 3741 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
3745 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); 3742 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
3746 3743
3747 Label arguments_loop, arguments_test; 3744 Label arguments_loop, arguments_test;
3748 __ mov(r9, r1); 3745 __ mov(r9, r1);
(...skipping 10 matching lines...) Expand all
3759 3756
3760 __ bind(&arguments_test); 3757 __ bind(&arguments_test);
3761 __ cmp(r9, Operand(r2)); 3758 __ cmp(r9, Operand(r2));
3762 __ b(lt, &arguments_loop); 3759 __ b(lt, &arguments_loop);
3763 3760
3764 // Return and remove the on-stack parameters. 3761 // Return and remove the on-stack parameters.
3765 __ add(sp, sp, Operand(3 * kPointerSize)); 3762 __ add(sp, sp, Operand(3 * kPointerSize));
3766 __ Ret(); 3763 __ Ret();
3767 3764
3768 // Do the runtime call to allocate the arguments object. 3765 // Do the runtime call to allocate the arguments object.
3766 // r0 = address of new object (tagged)
3769 // r2 = argument count (tagged) 3767 // r2 = argument count (tagged)
3770 __ bind(&runtime); 3768 __ bind(&runtime);
3771 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. 3769 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
3772 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 3770 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3773 } 3771 }
3774 3772
3775 3773
3776 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { 3774 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3777 // sp[0] : number of parameters 3775 // sp[0] : number of parameters
3778 // sp[4] : receiver displacement 3776 // sp[4] : receiver displacement
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
3887 const int kJSRegExpOffset = 3 * kPointerSize; 3885 const int kJSRegExpOffset = 3 * kPointerSize;
3888 3886
3889 Label runtime; 3887 Label runtime;
3890 // Allocation of registers for this function. These are in callee save 3888 // Allocation of registers for this function. These are in callee save
3891 // registers and will be preserved by the call to the native RegExp code, as 3889 // registers and will be preserved by the call to the native RegExp code, as
3892 // this code is called using the normal C calling convention. When calling 3890 // this code is called using the normal C calling convention. When calling
3893 // directly from generated code the native RegExp code will not do a GC and 3891 // directly from generated code the native RegExp code will not do a GC and
3894 // therefore the content of these registers are safe to use after the call. 3892 // therefore the content of these registers are safe to use after the call.
3895 Register subject = r4; 3893 Register subject = r4;
3896 Register regexp_data = r5; 3894 Register regexp_data = r5;
3897 Register last_match_info_elements = r6; 3895 Register last_match_info_elements = no_reg; // will be r6;
3898 3896
3899 // Ensure that a RegExp stack is allocated. 3897 // Ensure that a RegExp stack is allocated.
3900 Isolate* isolate = masm->isolate(); 3898 Isolate* isolate = masm->isolate();
3901 ExternalReference address_of_regexp_stack_memory_address = 3899 ExternalReference address_of_regexp_stack_memory_address =
3902 ExternalReference::address_of_regexp_stack_memory_address(isolate); 3900 ExternalReference::address_of_regexp_stack_memory_address(isolate);
3903 ExternalReference address_of_regexp_stack_memory_size = 3901 ExternalReference address_of_regexp_stack_memory_size =
3904 ExternalReference::address_of_regexp_stack_memory_size(isolate); 3902 ExternalReference::address_of_regexp_stack_memory_size(isolate);
3905 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); 3903 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
3906 __ ldr(r0, MemOperand(r0, 0)); 3904 __ ldr(r0, MemOperand(r0, 0));
3907 __ cmp(r0, Operand::Zero()); 3905 __ cmp(r0, Operand::Zero());
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
4020 __ JumpIfNotSmi(r1, &runtime); 4018 __ JumpIfNotSmi(r1, &runtime);
4021 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); 4019 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
4022 __ cmp(r3, Operand(r1)); 4020 __ cmp(r3, Operand(r1));
4023 __ b(ls, &runtime); 4021 __ b(ls, &runtime);
4024 __ SmiUntag(r1); 4022 __ SmiUntag(r1);
4025 4023
4026 STATIC_ASSERT(4 == kOneByteStringTag); 4024 STATIC_ASSERT(4 == kOneByteStringTag);
4027 STATIC_ASSERT(kTwoByteStringTag == 0); 4025 STATIC_ASSERT(kTwoByteStringTag == 0);
4028 __ and_(r0, r0, Operand(kStringEncodingMask)); 4026 __ and_(r0, r0, Operand(kStringEncodingMask));
4029 __ mov(r3, Operand(r0, ASR, 2), SetCC); 4027 __ mov(r3, Operand(r0, ASR, 2), SetCC);
4030 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); 4028 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
4031 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); 4029 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
4032 4030
4033 // (E) Carry on. String handling is done. 4031 // (E) Carry on. String handling is done.
4034 // r7: irregexp code 4032 // r6: irregexp code
4035 // Check that the irregexp code has been generated for the actual string 4033 // Check that the irregexp code has been generated for the actual string
4036 // encoding. If it has, the field contains a code object otherwise it contains 4034 // encoding. If it has, the field contains a code object otherwise it contains
4037 // a smi (code flushing support). 4035 // a smi (code flushing support).
4038 __ JumpIfSmi(r7, &runtime); 4036 __ JumpIfSmi(r6, &runtime);
4039 4037
4040 // r1: previous index 4038 // r1: previous index
4041 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); 4039 // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4042 // r7: code 4040 // r6: code
4043 // subject: Subject string 4041 // subject: Subject string
4044 // regexp_data: RegExp data (FixedArray) 4042 // regexp_data: RegExp data (FixedArray)
4045 // All checks done. Now push arguments for native regexp code. 4043 // All checks done. Now push arguments for native regexp code.
4046 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); 4044 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
4047 4045
4048 // Isolates: note we add an additional parameter here (isolate pointer). 4046 // Isolates: note we add an additional parameter here (isolate pointer).
4049 const int kRegExpExecuteArguments = 9; 4047 const int kRegExpExecuteArguments = 9;
4050 const int kParameterRegisters = 4; 4048 const int kParameterRegisters = 4;
4051 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); 4049 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4052 4050
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
4099 __ SmiUntag(r8); 4097 __ SmiUntag(r8);
4100 __ add(r3, r9, Operand(r8, LSL, r3)); 4098 __ add(r3, r9, Operand(r8, LSL, r3));
4101 4099
4102 // Argument 2 (r1): Previous index. 4100 // Argument 2 (r1): Previous index.
4103 // Already there 4101 // Already there
4104 4102
4105 // Argument 1 (r0): Subject string. 4103 // Argument 1 (r0): Subject string.
4106 __ mov(r0, subject); 4104 __ mov(r0, subject);
4107 4105
4108 // Locate the code entry and call it. 4106 // Locate the code entry and call it.
4109 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); 4107 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
4110 DirectCEntryStub stub; 4108 DirectCEntryStub stub;
4111 stub.GenerateCall(masm, r7); 4109 stub.GenerateCall(masm, r6);
4112 4110
4113 __ LeaveExitFrame(false, no_reg); 4111 __ LeaveExitFrame(false, no_reg);
4114 4112
4113 last_match_info_elements = r6;
4114
4115 // r0: result 4115 // r0: result
4116 // subject: subject string (callee saved) 4116 // subject: subject string (callee saved)
4117 // regexp_data: RegExp data (callee saved) 4117 // regexp_data: RegExp data (callee saved)
4118 // last_match_info_elements: Last match info elements (callee saved) 4118 // last_match_info_elements: Last match info elements (callee saved)
4119 // Check the result. 4119 // Check the result.
4120 Label success; 4120 Label success;
4121 __ cmp(r0, Operand(1)); 4121 __ cmp(r0, Operand(1));
4122 // We expect exactly one result since we force the called regexp to behave 4122 // We expect exactly one result since we force the called regexp to behave
4123 // as non-global. 4123 // as non-global.
4124 __ b(eq, &success); 4124 __ b(eq, &success);
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
4193 __ str(r2, FieldMemOperand(last_match_info_elements, 4193 __ str(r2, FieldMemOperand(last_match_info_elements,
4194 RegExpImpl::kLastCaptureCountOffset)); 4194 RegExpImpl::kLastCaptureCountOffset));
4195 // Store last subject and last input. 4195 // Store last subject and last input.
4196 __ str(subject, 4196 __ str(subject,
4197 FieldMemOperand(last_match_info_elements, 4197 FieldMemOperand(last_match_info_elements,
4198 RegExpImpl::kLastSubjectOffset)); 4198 RegExpImpl::kLastSubjectOffset));
4199 __ mov(r2, subject); 4199 __ mov(r2, subject);
4200 __ RecordWriteField(last_match_info_elements, 4200 __ RecordWriteField(last_match_info_elements,
4201 RegExpImpl::kLastSubjectOffset, 4201 RegExpImpl::kLastSubjectOffset,
4202 subject, 4202 subject,
4203 r7, 4203 r3,
4204 kLRHasNotBeenSaved, 4204 kLRHasNotBeenSaved,
4205 kDontSaveFPRegs); 4205 kDontSaveFPRegs);
4206 __ mov(subject, r2); 4206 __ mov(subject, r2);
4207 __ str(subject, 4207 __ str(subject,
4208 FieldMemOperand(last_match_info_elements, 4208 FieldMemOperand(last_match_info_elements,
4209 RegExpImpl::kLastInputOffset)); 4209 RegExpImpl::kLastInputOffset));
4210 __ RecordWriteField(last_match_info_elements, 4210 __ RecordWriteField(last_match_info_elements,
4211 RegExpImpl::kLastInputOffset, 4211 RegExpImpl::kLastInputOffset,
4212 subject, 4212 subject,
4213 r7, 4213 r3,
4214 kLRHasNotBeenSaved, 4214 kLRHasNotBeenSaved,
4215 kDontSaveFPRegs); 4215 kDontSaveFPRegs);
4216 4216
4217 // Get the static offsets vector filled by the native regexp code. 4217 // Get the static offsets vector filled by the native regexp code.
4218 ExternalReference address_of_static_offsets_vector = 4218 ExternalReference address_of_static_offsets_vector =
4219 ExternalReference::address_of_static_offsets_vector(isolate); 4219 ExternalReference::address_of_static_offsets_vector(isolate);
4220 __ mov(r2, Operand(address_of_static_offsets_vector)); 4220 __ mov(r2, Operand(address_of_static_offsets_vector));
4221 4221
4222 // r1: number of capture registers 4222 // r1: number of capture registers
4223 // r2: offsets vector 4223 // r2: offsets vector
(...skipping 545 matching lines...) Expand 10 before | Expand all | Expand 10 after
4769 4769
4770 4770
4771 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, 4771 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
4772 Register dest, 4772 Register dest,
4773 Register src, 4773 Register src,
4774 Register count, 4774 Register count,
4775 Register scratch1, 4775 Register scratch1,
4776 Register scratch2, 4776 Register scratch2,
4777 Register scratch3, 4777 Register scratch3,
4778 Register scratch4, 4778 Register scratch4,
4779 Register scratch5,
4780 int flags) { 4779 int flags) {
4781 bool ascii = (flags & COPY_ASCII) != 0; 4780 bool ascii = (flags & COPY_ASCII) != 0;
4782 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; 4781 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
4783 4782
4784 if (dest_always_aligned && FLAG_debug_code) { 4783 if (dest_always_aligned && FLAG_debug_code) {
4785 // Check that destination is actually word aligned if the flag says 4784 // Check that destination is actually word aligned if the flag says
4786 // that it is. 4785 // that it is.
4787 __ tst(dest, Operand(kPointerAlignmentMask)); 4786 __ tst(dest, Operand(kPointerAlignmentMask));
4788 __ Check(eq, "Destination of copy not aligned."); 4787 __ Check(eq, "Destination of copy not aligned.");
4789 } 4788 }
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
4844 __ and_(src, src, Operand(~3)); // Round down to load previous word. 4843 __ and_(src, src, Operand(~3)); // Round down to load previous word.
4845 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); 4844 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
4846 // Store the "shift" most significant bits of scratch in the least 4845 // Store the "shift" most significant bits of scratch in the least
4847 // signficant bits (i.e., shift down by (32-shift)). 4846 // signficant bits (i.e., shift down by (32-shift)).
4848 __ rsb(scratch2, left_shift, Operand(32)); 4847 __ rsb(scratch2, left_shift, Operand(32));
4849 Register right_shift = scratch2; 4848 Register right_shift = scratch2;
4850 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); 4849 __ mov(scratch1, Operand(scratch1, LSR, right_shift));
4851 4850
4852 __ bind(&loop); 4851 __ bind(&loop);
4853 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); 4852 __ ldr(scratch3, MemOperand(src, 4, PostIndex));
4854 __ sub(scratch5, limit, Operand(dest));
4855 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); 4853 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
4856 __ str(scratch1, MemOperand(dest, 4, PostIndex)); 4854 __ str(scratch1, MemOperand(dest, 4, PostIndex));
4857 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); 4855 __ mov(scratch1, Operand(scratch3, LSR, right_shift));
4858 // Loop if four or more bytes left to copy. 4856 // Loop if four or more bytes left to copy.
4859 // Compare to eight, because we did the subtract before increasing dst. 4857 __ sub(scratch3, limit, Operand(dest));
4860 __ sub(scratch5, scratch5, Operand(8), SetCC); 4858 __ sub(scratch3, scratch3, Operand(4), SetCC);
4861 __ b(ge, &loop); 4859 __ b(ge, &loop);
4862 } 4860 }
4863 // There is now between zero and three bytes left to copy (negative that 4861 // There is now between zero and three bytes left to copy (negative that
4864 // number is in scratch5), and between one and three bytes already read into 4862 // number is in scratch3), and between one and three bytes already read into
4865 // scratch1 (eight times that number in scratch4). We may have read past 4863 // scratch1 (eight times that number in scratch4). We may have read past
4866 // the end of the string, but because objects are aligned, we have not read 4864 // the end of the string, but because objects are aligned, we have not read
4867 // past the end of the object. 4865 // past the end of the object.
4868 // Find the minimum of remaining characters to move and preloaded characters 4866 // Find the minimum of remaining characters to move and preloaded characters
4869 // and write those as bytes. 4867 // and write those as bytes.
4870 __ add(scratch5, scratch5, Operand(4), SetCC); 4868 __ add(scratch3, scratch3, Operand(4), SetCC);
4871 __ b(eq, &done); 4869 __ b(eq, &done);
4872 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); 4870 __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
4873 // Move minimum of bytes read and bytes left to copy to scratch4. 4871 // Move minimum of bytes read and bytes left to copy to scratch4.
4874 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); 4872 __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
4875 // Between one and three (value in scratch5) characters already read into 4873 // Between one and three (value in scratch3) characters already read into
4876 // scratch ready to write. 4874 // scratch ready to write.
4877 __ cmp(scratch5, Operand(2)); 4875 __ cmp(scratch3, Operand(2));
4878 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); 4876 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
4879 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); 4877 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
4880 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); 4878 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
4881 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); 4879 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
4882 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); 4880 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
4883 // Copy any remaining bytes. 4881 // Copy any remaining bytes.
4884 __ b(&byte_loop); 4882 __ b(&byte_loop);
4885 4883
4886 // Simple loop. 4884 // Simple loop.
4887 // Copy words from src to dst, until less than four bytes left. 4885 // Copy words from src to dst, until less than four bytes left.
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
5207 // Allocate new sliced string. At this point we do not reload the instance 5205 // Allocate new sliced string. At this point we do not reload the instance
5208 // type including the string encoding because we simply rely on the info 5206 // type including the string encoding because we simply rely on the info
5209 // provided by the original string. It does not matter if the original 5207 // provided by the original string. It does not matter if the original
5210 // string's encoding is wrong because we always have to recheck encoding of 5208 // string's encoding is wrong because we always have to recheck encoding of
5211 // the newly created string's parent anyways due to externalized strings. 5209 // the newly created string's parent anyways due to externalized strings.
5212 Label two_byte_slice, set_slice_header; 5210 Label two_byte_slice, set_slice_header;
5213 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); 5211 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
5214 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 5212 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5215 __ tst(r1, Operand(kStringEncodingMask)); 5213 __ tst(r1, Operand(kStringEncodingMask));
5216 __ b(eq, &two_byte_slice); 5214 __ b(eq, &two_byte_slice);
5217 __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); 5215 __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
5218 __ jmp(&set_slice_header); 5216 __ jmp(&set_slice_header);
5219 __ bind(&two_byte_slice); 5217 __ bind(&two_byte_slice);
5220 __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); 5218 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
5221 __ bind(&set_slice_header); 5219 __ bind(&set_slice_header);
5222 __ mov(r3, Operand(r3, LSL, 1)); 5220 __ mov(r3, Operand(r3, LSL, 1));
5223 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); 5221 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
5224 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); 5222 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
5225 __ jmp(&return_r0); 5223 __ jmp(&return_r0);
5226 5224
5227 __ bind(&copy_routine); 5225 __ bind(&copy_routine);
5228 } 5226 }
5229 5227
5230 // r5: underlying subject string 5228 // r5: underlying subject string
(...skipping 20 matching lines...) Expand all
5251 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 5249 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
5252 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5250 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5253 5251
5254 __ bind(&allocate_result); 5252 __ bind(&allocate_result);
5255 // Sequential acii string. Allocate the result. 5253 // Sequential acii string. Allocate the result.
5256 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); 5254 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
5257 __ tst(r1, Operand(kStringEncodingMask)); 5255 __ tst(r1, Operand(kStringEncodingMask));
5258 __ b(eq, &two_byte_sequential); 5256 __ b(eq, &two_byte_sequential);
5259 5257
5260 // Allocate and copy the resulting ASCII string. 5258 // Allocate and copy the resulting ASCII string.
5261 __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); 5259 __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
5262 5260
5263 // Locate first character of substring to copy. 5261 // Locate first character of substring to copy.
5264 __ add(r5, r5, r3); 5262 __ add(r5, r5, r3);
5265 // Locate first character of result. 5263 // Locate first character of result.
5266 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5264 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5267 5265
5268 // r0: result string 5266 // r0: result string
5269 // r1: first character of result string 5267 // r1: first character of result string
5270 // r2: result string length 5268 // r2: result string length
5271 // r5: first character of substring to copy 5269 // r5: first character of substring to copy
5272 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 5270 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5273 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, 5271 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
5274 COPY_ASCII | DEST_ALWAYS_ALIGNED); 5272 COPY_ASCII | DEST_ALWAYS_ALIGNED);
5275 __ jmp(&return_r0); 5273 __ jmp(&return_r0);
5276 5274
5277 // Allocate and copy the resulting two-byte string. 5275 // Allocate and copy the resulting two-byte string.
5278 __ bind(&two_byte_sequential); 5276 __ bind(&two_byte_sequential);
5279 __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); 5277 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
5280 5278
5281 // Locate first character of substring to copy. 5279 // Locate first character of substring to copy.
5282 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 5280 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5283 __ add(r5, r5, Operand(r3, LSL, 1)); 5281 __ add(r5, r5, Operand(r3, LSL, 1));
5284 // Locate first character of result. 5282 // Locate first character of result.
5285 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5283 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5286 5284
5287 // r0: result string. 5285 // r0: result string.
5288 // r1: first character of result. 5286 // r1: first character of result.
5289 // r2: result length. 5287 // r2: result length.
5290 // r5: first character of substring to copy. 5288 // r5: first character of substring to copy.
5291 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 5289 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5292 StringHelper::GenerateCopyCharactersLong( 5290 StringHelper::GenerateCopyCharactersLong(
5293 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); 5291 masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
5294 5292
5295 __ bind(&return_r0); 5293 __ bind(&return_r0);
5296 Counters* counters = masm->isolate()->counters(); 5294 Counters* counters = masm->isolate()->counters();
5297 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); 5295 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
5298 __ Drop(3); 5296 __ Drop(3);
5299 __ Ret(); 5297 __ Ret();
5300 5298
5301 // Just jump to runtime to create the sub string. 5299 // Just jump to runtime to create the sub string.
5302 __ bind(&runtime); 5300 __ bind(&runtime);
5303 __ TailCallRuntime(Runtime::kSubString, 3, 1); 5301 __ TailCallRuntime(Runtime::kSubString, 3, 1);
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
5549 __ cmp(r6, Operand(2)); 5547 __ cmp(r6, Operand(2));
5550 __ b(ne, &longer_than_two); 5548 __ b(ne, &longer_than_two);
5551 5549
5552 // Check that both strings are non-external ASCII strings. 5550 // Check that both strings are non-external ASCII strings.
5553 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5551 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
5554 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5552 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5555 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5553 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5556 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5554 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5557 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5555 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5558 } 5556 }
5559 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, 5557 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
5560 &call_runtime); 5558 &call_runtime);
5561 5559
5562 // Get the two characters forming the sub string. 5560 // Get the two characters forming the sub string.
5563 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); 5561 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
5564 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); 5562 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
5565 5563
5566 // Try to lookup two character string in string table. If it is not found 5564 // Try to lookup two character string in string table. If it is not found
5567 // just allocate a new one. 5565 // just allocate a new one.
5568 Label make_two_character_string; 5566 Label make_two_character_string;
5569 StringHelper::GenerateTwoCharacterStringTableProbe( 5567 StringHelper::GenerateTwoCharacterStringTableProbe(
5570 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); 5568 masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
5571 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5569 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5572 __ add(sp, sp, Operand(2 * kPointerSize)); 5570 __ add(sp, sp, Operand(2 * kPointerSize));
5573 __ Ret(); 5571 __ Ret();
5574 5572
5575 __ bind(&make_two_character_string); 5573 __ bind(&make_two_character_string);
5576 // Resulting string has length 2 and first chars of two strings 5574 // Resulting string has length 2 and first chars of two strings
5577 // are combined into single halfword in r2 register. 5575 // are combined into single halfword in r2 register.
5578 // So we can fill resulting string without two loops by a single 5576 // So we can fill resulting string without two loops by a single
5579 // halfword store instruction (which assumes that processor is 5577 // halfword store instruction (which assumes that processor is
5580 // in a little endian mode) 5578 // in a little endian mode)
(...skipping 24 matching lines...) Expand all
5605 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5603 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5606 } 5604 }
5607 Label non_ascii, allocated, ascii_data; 5605 Label non_ascii, allocated, ascii_data;
5608 STATIC_ASSERT(kTwoByteStringTag == 0); 5606 STATIC_ASSERT(kTwoByteStringTag == 0);
5609 __ tst(r4, Operand(kStringEncodingMask)); 5607 __ tst(r4, Operand(kStringEncodingMask));
5610 __ tst(r5, Operand(kStringEncodingMask), ne); 5608 __ tst(r5, Operand(kStringEncodingMask), ne);
5611 __ b(eq, &non_ascii); 5609 __ b(eq, &non_ascii);
5612 5610
5613 // Allocate an ASCII cons string. 5611 // Allocate an ASCII cons string.
5614 __ bind(&ascii_data); 5612 __ bind(&ascii_data);
5615 __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); 5613 __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
5616 __ bind(&allocated); 5614 __ bind(&allocated);
5617 // Fill the fields of the cons string. 5615 // Fill the fields of the cons string.
5618 Label skip_write_barrier, after_writing; 5616 Label skip_write_barrier, after_writing;
5619 ExternalReference high_promotion_mode = ExternalReference:: 5617 ExternalReference high_promotion_mode = ExternalReference::
5620 new_space_high_promotion_mode_active_address(masm->isolate()); 5618 new_space_high_promotion_mode_active_address(masm->isolate());
5621 __ mov(r4, Operand(high_promotion_mode)); 5619 __ mov(r4, Operand(high_promotion_mode));
5622 __ ldr(r4, MemOperand(r4, 0)); 5620 __ ldr(r4, MemOperand(r4, 0));
5623 __ cmp(r4, Operand::Zero()); 5621 __ cmp(r4, Operand::Zero());
5624 __ b(eq, &skip_write_barrier); 5622 __ b(eq, &skip_write_barrier);
5625 5623
5626 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); 5624 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
5627 __ RecordWriteField(r7, 5625 __ RecordWriteField(r3,
5628 ConsString::kFirstOffset, 5626 ConsString::kFirstOffset,
5629 r0, 5627 r0,
5630 r4, 5628 r4,
5631 kLRHasNotBeenSaved, 5629 kLRHasNotBeenSaved,
5632 kDontSaveFPRegs); 5630 kDontSaveFPRegs);
5633 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); 5631 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
5634 __ RecordWriteField(r7, 5632 __ RecordWriteField(r3,
5635 ConsString::kSecondOffset, 5633 ConsString::kSecondOffset,
5636 r1, 5634 r1,
5637 r4, 5635 r4,
5638 kLRHasNotBeenSaved, 5636 kLRHasNotBeenSaved,
5639 kDontSaveFPRegs); 5637 kDontSaveFPRegs);
5640 __ jmp(&after_writing); 5638 __ jmp(&after_writing);
5641 5639
5642 __ bind(&skip_write_barrier); 5640 __ bind(&skip_write_barrier);
5643 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); 5641 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
5644 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); 5642 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
5645 5643
5646 __ bind(&after_writing); 5644 __ bind(&after_writing);
5647 5645
5648 __ mov(r0, Operand(r7)); 5646 __ mov(r0, Operand(r3));
5649 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5647 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5650 __ add(sp, sp, Operand(2 * kPointerSize)); 5648 __ add(sp, sp, Operand(2 * kPointerSize));
5651 __ Ret(); 5649 __ Ret();
5652 5650
5653 __ bind(&non_ascii); 5651 __ bind(&non_ascii);
5654 // At least one of the strings is two-byte. Check whether it happens 5652 // At least one of the strings is two-byte. Check whether it happens
5655 // to contain only one byte characters. 5653 // to contain only one byte characters.
5656 // r4: first instance type. 5654 // r4: first instance type.
5657 // r5: second instance type. 5655 // r5: second instance type.
5658 __ tst(r4, Operand(kOneByteDataHintMask)); 5656 __ tst(r4, Operand(kOneByteDataHintMask));
5659 __ tst(r5, Operand(kOneByteDataHintMask), ne); 5657 __ tst(r5, Operand(kOneByteDataHintMask), ne);
5660 __ b(ne, &ascii_data); 5658 __ b(ne, &ascii_data);
5661 __ eor(r4, r4, Operand(r5)); 5659 __ eor(r4, r4, Operand(r5));
5662 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); 5660 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
5663 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); 5661 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
5664 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); 5662 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
5665 __ b(eq, &ascii_data); 5663 __ b(eq, &ascii_data);
5666 5664
5667 // Allocate a two byte cons string. 5665 // Allocate a two byte cons string.
5668 __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); 5666 __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
5669 __ jmp(&allocated); 5667 __ jmp(&allocated);
5670 5668
5671 // We cannot encounter sliced strings or cons strings here since: 5669 // We cannot encounter sliced strings or cons strings here since:
5672 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); 5670 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
5673 // Handle creating a flat result from either external or sequential strings. 5671 // Handle creating a flat result from either external or sequential strings.
5674 // Locate the first characters' locations. 5672 // Locate the first characters' locations.
5675 // r0: first string 5673 // r0: first string
5676 // r1: second string 5674 // r1: second string
5677 // r2: length of first string 5675 // r2: length of first string
5678 // r3: length of second string 5676 // r3: length of second string
5679 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5677 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5680 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 5678 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
5681 // r6: sum of lengths. 5679 // r6: sum of lengths.
5682 Label first_prepared, second_prepared; 5680 Label first_prepared, second_prepared;
5683 __ bind(&string_add_flat_result); 5681 __ bind(&string_add_flat_result);
5684 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { 5682 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
5685 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); 5683 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5686 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); 5684 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
5687 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); 5685 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5688 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); 5686 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
5689 } 5687 }
5690 5688
5691 // Check whether both strings have same encoding 5689 // Check whether both strings have same encoding
5692 __ eor(r7, r4, Operand(r5)); 5690 __ eor(ip, r4, Operand(r5));
5693 __ tst(r7, Operand(kStringEncodingMask)); 5691 ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
5692 __ tst(ip, Operand(kStringEncodingMask));
5694 __ b(ne, &call_runtime); 5693 __ b(ne, &call_runtime);
5695 5694
5696 STATIC_ASSERT(kSeqStringTag == 0); 5695 STATIC_ASSERT(kSeqStringTag == 0);
5697 __ tst(r4, Operand(kStringRepresentationMask)); 5696 __ tst(r4, Operand(kStringRepresentationMask));
5698 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5697 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5699 __ add(r7, 5698 __ add(r6,
5700 r0, 5699 r0,
5701 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), 5700 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
5702 LeaveCC, 5701 LeaveCC,
5703 eq); 5702 eq);
5704 __ b(eq, &first_prepared); 5703 __ b(eq, &first_prepared);
5705 // External string: rule out short external string and load string resource. 5704 // External string: rule out short external string and load string resource.
5706 STATIC_ASSERT(kShortExternalStringTag != 0); 5705 STATIC_ASSERT(kShortExternalStringTag != 0);
5707 __ tst(r4, Operand(kShortExternalStringMask)); 5706 __ tst(r4, Operand(kShortExternalStringMask));
5708 __ b(ne, &call_runtime); 5707 __ b(ne, &call_runtime);
5709 __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); 5708 __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
5710 __ bind(&first_prepared); 5709 __ bind(&first_prepared);
5711 5710
5712 STATIC_ASSERT(kSeqStringTag == 0); 5711 STATIC_ASSERT(kSeqStringTag == 0);
5713 __ tst(r5, Operand(kStringRepresentationMask)); 5712 __ tst(r5, Operand(kStringRepresentationMask));
5714 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); 5713 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5715 __ add(r1, 5714 __ add(r1,
5716 r1, 5715 r1,
5717 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), 5716 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
5718 LeaveCC, 5717 LeaveCC,
5719 eq); 5718 eq);
5720 __ b(eq, &second_prepared); 5719 __ b(eq, &second_prepared);
5721 // External string: rule out short external string and load string resource. 5720 // External string: rule out short external string and load string resource.
5722 STATIC_ASSERT(kShortExternalStringTag != 0); 5721 STATIC_ASSERT(kShortExternalStringTag != 0);
5723 __ tst(r5, Operand(kShortExternalStringMask)); 5722 __ tst(r5, Operand(kShortExternalStringMask));
5724 __ b(ne, &call_runtime); 5723 __ b(ne, &call_runtime);
5725 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); 5724 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
5726 __ bind(&second_prepared); 5725 __ bind(&second_prepared);
5727 5726
5728 Label non_ascii_string_add_flat_result; 5727 Label non_ascii_string_add_flat_result;
5729 // r7: first character of first string 5728 // r6: first character of first string
5730 // r1: first character of second string 5729 // r1: first character of second string
5731 // r2: length of first string. 5730 // r2: length of first string.
5732 // r3: length of second string. 5731 // r3: length of second string.
5733 // r6: sum of lengths.
5734 // Both strings have the same encoding. 5732 // Both strings have the same encoding.
5735 STATIC_ASSERT(kTwoByteStringTag == 0); 5733 STATIC_ASSERT(kTwoByteStringTag == 0);
5736 __ tst(r5, Operand(kStringEncodingMask)); 5734 __ tst(r5, Operand(kStringEncodingMask));
5737 __ b(eq, &non_ascii_string_add_flat_result); 5735 __ b(eq, &non_ascii_string_add_flat_result);
5738 5736
5739 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); 5737 __ add(r2, r2, Operand(r3));
5740 __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); 5738 __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
5739 __ sub(r2, r2, Operand(r3));
5740 __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5741 // r0: result string. 5741 // r0: result string.
5742 // r7: first character of first string. 5742 // r6: first character of first string.
5743 // r1: first character of second string. 5743 // r1: first character of second string.
5744 // r2: length of first string. 5744 // r2: length of first string.
5745 // r3: length of second string. 5745 // r3: length of second string.
5746 // r6: first character of result. 5746 // r5: first character of result.
5747 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); 5747 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
5748 // r6: next character of result. 5748 // r5: next character of result.
5749 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); 5749 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
5750 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5750 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5751 __ add(sp, sp, Operand(2 * kPointerSize)); 5751 __ add(sp, sp, Operand(2 * kPointerSize));
5752 __ Ret(); 5752 __ Ret();
5753 5753
5754 __ bind(&non_ascii_string_add_flat_result); 5754 __ bind(&non_ascii_string_add_flat_result);
5755 __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); 5755 __ add(r2, r2, Operand(r3));
5756 __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 5756 __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
5757 __ sub(r2, r2, Operand(r3));
5758 __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5757 // r0: result string. 5759 // r0: result string.
5758 // r7: first character of first string. 5760 // r6: first character of first string.
5759 // r1: first character of second string. 5761 // r1: first character of second string.
5760 // r2: length of first string. 5762 // r2: length of first string.
5761 // r3: length of second string. 5763 // r3: length of second string.
5762 // r6: first character of result. 5764 // r5: first character of result.
5763 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); 5765 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
5764 // r6: next character of result. 5766 // r5: next character of result.
5765 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); 5767 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
5766 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); 5768 __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5767 __ add(sp, sp, Operand(2 * kPointerSize)); 5769 __ add(sp, sp, Operand(2 * kPointerSize));
5768 __ Ret(); 5770 __ Ret();
5769 5771
5770 // Just jump to runtime to add the two strings. 5772 // Just jump to runtime to add the two strings.
5771 __ bind(&call_runtime); 5773 __ bind(&call_runtime);
5772 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { 5774 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
5773 GenerateRegisterArgsPop(masm); 5775 GenerateRegisterArgsPop(masm);
5774 // Build a frame 5776 // Build a frame
5775 { 5777 {
(...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after
6456 struct AheadOfTimeWriteBarrierStubList { 6458 struct AheadOfTimeWriteBarrierStubList {
6457 Register object, value, address; 6459 Register object, value, address;
6458 RememberedSetAction action; 6460 RememberedSetAction action;
6459 }; 6461 };
6460 6462
6461 6463
6462 #define REG(Name) { kRegister_ ## Name ## _Code } 6464 #define REG(Name) { kRegister_ ## Name ## _Code }
6463 6465
6464 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { 6466 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
6465 // Used in RegExpExecStub. 6467 // Used in RegExpExecStub.
6466 { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, 6468 { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET },
6467 // Used in CompileArrayPushCall. 6469 // Used in CompileArrayPushCall.
6468 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. 6470 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
6469 // Also used in KeyedStoreIC::GenerateGeneric. 6471 // Also used in KeyedStoreIC::GenerateGeneric.
6470 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, 6472 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
6471 // Used in CompileStoreGlobal. 6473 // Used in CompileStoreGlobal.
6472 { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET }, 6474 { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
6473 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. 6475 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
6474 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, 6476 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
6475 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, 6477 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
6476 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. 6478 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
6477 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, 6479 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
6478 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, 6480 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
6479 // KeyedStoreStubCompiler::GenerateStoreFastElement. 6481 // KeyedStoreStubCompiler::GenerateStoreFastElement.
6480 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, 6482 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
6481 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, 6483 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
6482 // ElementsTransitionGenerator::GenerateMapChangeElementTransition 6484 // ElementsTransitionGenerator::GenerateMapChangeElementTransition
6483 // and ElementsTransitionGenerator::GenerateSmiToDouble 6485 // and ElementsTransitionGenerator::GenerateSmiToDouble
6484 // and ElementsTransitionGenerator::GenerateDoubleToObject 6486 // and ElementsTransitionGenerator::GenerateDoubleToObject
6485 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, 6487 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
6486 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, 6488 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
6487 // ElementsTransitionGenerator::GenerateDoubleToObject 6489 // ElementsTransitionGenerator::GenerateDoubleToObject
6488 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, 6490 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
6489 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, 6491 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
6490 // StoreArrayLiteralElementStub::Generate 6492 // StoreArrayLiteralElementStub::Generate
6491 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, 6493 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
6492 // FastNewClosureStub::Generate 6494 // FastNewClosureStub::Generate
6493 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, 6495 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
6494 // StringAddStub::Generate 6496 // StringAddStub::Generate
6495 { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, 6497 { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
6496 { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, 6498 { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
6497 // Null termination. 6499 // Null termination.
6498 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 6500 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6499 }; 6501 };
6500 6502
6501 #undef REG 6503 #undef REG
6502 6504
6503 6505
6504 bool RecordWriteStub::IsPregenerated() { 6506 bool RecordWriteStub::IsPregenerated() {
6505 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 6507 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6506 !entry->object.is(no_reg); 6508 !entry->object.is(no_reg);
(...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after
7175 __ bind(&fast_elements_case); 7177 __ bind(&fast_elements_case);
7176 GenerateCase(masm, FAST_ELEMENTS); 7178 GenerateCase(masm, FAST_ELEMENTS);
7177 } 7179 }
7178 7180
7179 7181
7180 #undef __ 7182 #undef __
7181 7183
7182 } } // namespace v8::internal 7184 } } // namespace v8::internal
7183 7185
7184 #endif // V8_TARGET_ARCH_ARM 7186 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/deoptimizer-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698