| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 495 Register scratch1, | 495 Register scratch1, |
| 496 Register scratch2, | 496 Register scratch2, |
| 497 Label* not_number); | 497 Label* not_number); |
| 498 }; | 498 }; |
| 499 | 499 |
| 500 | 500 |
| 501 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 501 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
| 502 FloatingPointHelper::Destination destination, | 502 FloatingPointHelper::Destination destination, |
| 503 Register scratch1, | 503 Register scratch1, |
| 504 Register scratch2) { | 504 Register scratch2) { |
| 505 if (CpuFeatures::IsSupported(VFP3)) { | 505 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 506 CpuFeatures::Scope scope(VFP3); | 506 CpuFeatures::Scope scope(VFP3); |
| 507 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); | 507 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); |
| 508 __ vmov(d7.high(), scratch1); | 508 __ vmov(d7.high(), scratch1); |
| 509 __ vcvt_f64_s32(d7, d7.high()); | 509 __ vcvt_f64_s32(d7, d7.high()); |
| 510 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); | 510 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); |
| 511 __ vmov(d6.high(), scratch1); | 511 __ vmov(d6.high(), scratch1); |
| 512 __ vcvt_f64_s32(d6, d6.high()); | 512 __ vcvt_f64_s32(d6, d6.high()); |
| 513 if (destination == kCoreRegisters) { | 513 if (destination == kCoreRegisters) { |
| 514 __ vmov(r2, r3, d7); | 514 __ vmov(r2, r3, d7); |
| 515 __ vmov(r0, r1, d6); | 515 __ vmov(r0, r1, d6); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 563 Heap::kHeapNumberMapRootIndex, | 563 Heap::kHeapNumberMapRootIndex, |
| 564 "HeapNumberMap register clobbered."); | 564 "HeapNumberMap register clobbered."); |
| 565 } | 565 } |
| 566 | 566 |
| 567 Label is_smi, done; | 567 Label is_smi, done; |
| 568 | 568 |
| 569 __ JumpIfSmi(object, &is_smi); | 569 __ JumpIfSmi(object, &is_smi); |
| 570 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 570 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
| 571 | 571 |
| 572 // Handle loading a double from a heap number. | 572 // Handle loading a double from a heap number. |
| 573 if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) { | 573 if (Isolate::Current()->cpu_features()->IsSupported(VFP3) && |
| 574 destination == kVFPRegisters) { |
| 574 CpuFeatures::Scope scope(VFP3); | 575 CpuFeatures::Scope scope(VFP3); |
| 575 // Load the double from tagged HeapNumber to double register. | 576 // Load the double from tagged HeapNumber to double register. |
| 576 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 577 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 577 __ vldr(dst, scratch1, HeapNumber::kValueOffset); | 578 __ vldr(dst, scratch1, HeapNumber::kValueOffset); |
| 578 } else { | 579 } else { |
| 579 ASSERT(destination == kCoreRegisters); | 580 ASSERT(destination == kCoreRegisters); |
| 580 // Load the double from heap number to dst1 and dst2 in double format. | 581 // Load the double from heap number to dst1 and dst2 in double format. |
| 581 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 582 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 582 } | 583 } |
| 583 __ jmp(&done); | 584 __ jmp(&done); |
| 584 | 585 |
| 585 // Handle loading a double from a smi. | 586 // Handle loading a double from a smi. |
| 586 __ bind(&is_smi); | 587 __ bind(&is_smi); |
| 587 if (CpuFeatures::IsSupported(VFP3)) { | 588 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 588 CpuFeatures::Scope scope(VFP3); | 589 CpuFeatures::Scope scope(VFP3); |
| 589 // Convert smi to double using VFP instructions. | 590 // Convert smi to double using VFP instructions. |
| 590 __ SmiUntag(scratch1, object); | 591 __ SmiUntag(scratch1, object); |
| 591 __ vmov(dst.high(), scratch1); | 592 __ vmov(dst.high(), scratch1); |
| 592 __ vcvt_f64_s32(dst, dst.high()); | 593 __ vcvt_f64_s32(dst, dst.high()); |
| 593 if (destination == kCoreRegisters) { | 594 if (destination == kCoreRegisters) { |
| 594 // Load the converted smi to dst1 and dst2 in double format. | 595 // Load the converted smi to dst1 and dst2 in double format. |
| 595 __ vmov(dst1, dst2, dst); | 596 __ vmov(dst1, dst2, dst); |
| 596 } | 597 } |
| 597 } else { | 598 } else { |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 668 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | 669 ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
| 669 ASSERT(!scratch1.is(scratch2)); | 670 ASSERT(!scratch1.is(scratch2)); |
| 670 ASSERT(!heap_number_map.is(object) && | 671 ASSERT(!heap_number_map.is(object) && |
| 671 !heap_number_map.is(scratch1) && | 672 !heap_number_map.is(scratch1) && |
| 672 !heap_number_map.is(scratch2)); | 673 !heap_number_map.is(scratch2)); |
| 673 | 674 |
| 674 Label done, obj_is_not_smi; | 675 Label done, obj_is_not_smi; |
| 675 | 676 |
| 676 __ JumpIfNotSmi(object, &obj_is_not_smi); | 677 __ JumpIfNotSmi(object, &obj_is_not_smi); |
| 677 __ SmiUntag(scratch1, object); | 678 __ SmiUntag(scratch1, object); |
| 678 if (CpuFeatures::IsSupported(VFP3)) { | 679 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 679 CpuFeatures::Scope scope(VFP3); | 680 CpuFeatures::Scope scope(VFP3); |
| 680 __ vmov(single_scratch, scratch1); | 681 __ vmov(single_scratch, scratch1); |
| 681 __ vcvt_f64_s32(double_dst, single_scratch); | 682 __ vcvt_f64_s32(double_dst, single_scratch); |
| 682 if (destination == kCoreRegisters) { | 683 if (destination == kCoreRegisters) { |
| 683 __ vmov(dst1, dst2, double_dst); | 684 __ vmov(dst1, dst2, double_dst); |
| 684 } | 685 } |
| 685 } else { | 686 } else { |
| 686 Label fewer_than_20_useful_bits; | 687 Label fewer_than_20_useful_bits; |
| 687 // Expected output: | 688 // Expected output: |
| 688 // | dst1 | dst2 | | 689 // | dst1 | dst2 | |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 736 | 737 |
| 737 __ bind(&obj_is_not_smi); | 738 __ bind(&obj_is_not_smi); |
| 738 if (FLAG_debug_code) { | 739 if (FLAG_debug_code) { |
| 739 __ AbortIfNotRootValue(heap_number_map, | 740 __ AbortIfNotRootValue(heap_number_map, |
| 740 Heap::kHeapNumberMapRootIndex, | 741 Heap::kHeapNumberMapRootIndex, |
| 741 "HeapNumberMap register clobbered."); | 742 "HeapNumberMap register clobbered."); |
| 742 } | 743 } |
| 743 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 744 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
| 744 | 745 |
| 745 // Load the number. | 746 // Load the number. |
| 746 if (CpuFeatures::IsSupported(VFP3)) { | 747 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 747 CpuFeatures::Scope scope(VFP3); | 748 CpuFeatures::Scope scope(VFP3); |
| 748 // Load the double value. | 749 // Load the double value. |
| 749 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 750 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 750 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); | 751 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); |
| 751 | 752 |
| 752 __ EmitVFPTruncate(kRoundToZero, | 753 __ EmitVFPTruncate(kRoundToZero, |
| 753 single_scratch, | 754 single_scratch, |
| 754 double_dst, | 755 double_dst, |
| 755 scratch1, | 756 scratch1, |
| 756 scratch2, | 757 scratch2, |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 810 | 811 |
| 811 if (FLAG_debug_code) { | 812 if (FLAG_debug_code) { |
| 812 __ AbortIfNotRootValue(heap_number_map, | 813 __ AbortIfNotRootValue(heap_number_map, |
| 813 Heap::kHeapNumberMapRootIndex, | 814 Heap::kHeapNumberMapRootIndex, |
| 814 "HeapNumberMap register clobbered."); | 815 "HeapNumberMap register clobbered."); |
| 815 } | 816 } |
| 816 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 817 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
| 817 | 818 |
| 818 // Object is a heap number. | 819 // Object is a heap number. |
| 819 // Convert the floating point value to a 32-bit integer. | 820 // Convert the floating point value to a 32-bit integer. |
| 820 if (CpuFeatures::IsSupported(VFP3)) { | 821 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 821 CpuFeatures::Scope scope(VFP3); | 822 CpuFeatures::Scope scope(VFP3); |
| 822 SwVfpRegister single_scratch = double_scratch.low(); | 823 SwVfpRegister single_scratch = double_scratch.low(); |
| 823 // Load the double value. | 824 // Load the double value. |
| 824 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 825 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 825 __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset); | 826 __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset); |
| 826 | 827 |
| 827 __ EmitVFPTruncate(kRoundToZero, | 828 __ EmitVFPTruncate(kRoundToZero, |
| 828 single_scratch, | 829 single_scratch, |
| 829 double_scratch, | 830 double_scratch, |
| 830 scratch1, | 831 scratch1, |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1021 Condition cond, | 1022 Condition cond, |
| 1022 bool never_nan_nan) { | 1023 bool never_nan_nan) { |
| 1023 Label not_identical; | 1024 Label not_identical; |
| 1024 Label heap_number, return_equal; | 1025 Label heap_number, return_equal; |
| 1025 __ cmp(r0, r1); | 1026 __ cmp(r0, r1); |
| 1026 __ b(ne, ¬_identical); | 1027 __ b(ne, ¬_identical); |
| 1027 | 1028 |
| 1028 // The two objects are identical. If we know that one of them isn't NaN then | 1029 // The two objects are identical. If we know that one of them isn't NaN then |
| 1029 // we now know they test equal. | 1030 // we now know they test equal. |
| 1030 if (cond != eq || !never_nan_nan) { | 1031 if (cond != eq || !never_nan_nan) { |
| 1031 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | 1032 // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), |
| 1032 // so we do the second best thing - test it ourselves. | 1033 // so we do the second best thing - test it ourselves. |
| 1033 // They are both equal and they are not both Smis so both of them are not | 1034 // They are both equal and they are not both Smis so both of them are not |
| 1034 // Smis. If it's not a heap number, then return equal. | 1035 // Smis. If it's not a heap number, then return equal. |
| 1035 if (cond == lt || cond == gt) { | 1036 if (cond == lt || cond == gt) { |
| 1036 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); | 1037 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); |
| 1037 __ b(ge, slow); | 1038 __ b(ge, slow); |
| 1038 } else { | 1039 } else { |
| 1039 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 1040 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
| 1040 __ b(eq, &heap_number); | 1041 __ b(eq, &heap_number); |
| 1041 // Comparing JS objects with <=, >= is complicated. | 1042 // Comparing JS objects with <=, >= is complicated. |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1144 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 1145 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
| 1145 } | 1146 } |
| 1146 __ Ret(ne); | 1147 __ Ret(ne); |
| 1147 } else { | 1148 } else { |
| 1148 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 1149 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| 1149 // the runtime. | 1150 // the runtime. |
| 1150 __ b(ne, slow); | 1151 __ b(ne, slow); |
| 1151 } | 1152 } |
| 1152 | 1153 |
| 1153 // Lhs is a smi, rhs is a number. | 1154 // Lhs is a smi, rhs is a number. |
| 1154 if (CpuFeatures::IsSupported(VFP3)) { | 1155 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 1155 // Convert lhs to a double in d7. | 1156 // Convert lhs to a double in d7. |
| 1156 CpuFeatures::Scope scope(VFP3); | 1157 CpuFeatures::Scope scope(VFP3); |
| 1157 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); | 1158 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); |
| 1158 // Load the double from rhs, tagged HeapNumber r0, to d6. | 1159 // Load the double from rhs, tagged HeapNumber r0, to d6. |
| 1159 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 1160 __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| 1160 __ vldr(d6, r7, HeapNumber::kValueOffset); | 1161 __ vldr(d6, r7, HeapNumber::kValueOffset); |
| 1161 } else { | 1162 } else { |
| 1162 __ push(lr); | 1163 __ push(lr); |
| 1163 // Convert lhs to a double in r2, r3. | 1164 // Convert lhs to a double in r2, r3. |
| 1164 __ mov(r7, Operand(lhs)); | 1165 __ mov(r7, Operand(lhs)); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1184 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 1185 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
| 1185 } | 1186 } |
| 1186 __ Ret(ne); | 1187 __ Ret(ne); |
| 1187 } else { | 1188 } else { |
| 1188 // Smi compared non-strictly with a non-smi non-heap-number. Call | 1189 // Smi compared non-strictly with a non-smi non-heap-number. Call |
| 1189 // the runtime. | 1190 // the runtime. |
| 1190 __ b(ne, slow); | 1191 __ b(ne, slow); |
| 1191 } | 1192 } |
| 1192 | 1193 |
| 1193 // Rhs is a smi, lhs is a heap number. | 1194 // Rhs is a smi, lhs is a heap number. |
| 1194 if (CpuFeatures::IsSupported(VFP3)) { | 1195 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 1195 CpuFeatures::Scope scope(VFP3); | 1196 CpuFeatures::Scope scope(VFP3); |
| 1196 // Load the double from lhs, tagged HeapNumber r1, to d7. | 1197 // Load the double from lhs, tagged HeapNumber r1, to d7. |
| 1197 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 1198 __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| 1198 __ vldr(d7, r7, HeapNumber::kValueOffset); | 1199 __ vldr(d7, r7, HeapNumber::kValueOffset); |
| 1199 // Convert rhs to a double in d6 . | 1200 // Convert rhs to a double in d6 . |
| 1200 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | 1201 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); |
| 1201 } else { | 1202 } else { |
| 1202 __ push(lr); | 1203 __ push(lr); |
| 1203 // Load lhs to a double in r2, r3. | 1204 // Load lhs to a double in r2, r3. |
| 1204 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1205 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1364 (lhs.is(r1) && rhs.is(r0))); | 1365 (lhs.is(r1) && rhs.is(r0))); |
| 1365 | 1366 |
| 1366 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 1367 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); |
| 1367 __ b(ne, not_heap_numbers); | 1368 __ b(ne, not_heap_numbers); |
| 1368 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 1369 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 1369 __ cmp(r2, r3); | 1370 __ cmp(r2, r3); |
| 1370 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | 1371 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. |
| 1371 | 1372 |
| 1372 // Both are heap numbers. Load them up then jump to the code we have | 1373 // Both are heap numbers. Load them up then jump to the code we have |
| 1373 // for that. | 1374 // for that. |
| 1374 if (CpuFeatures::IsSupported(VFP3)) { | 1375 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 1375 CpuFeatures::Scope scope(VFP3); | 1376 CpuFeatures::Scope scope(VFP3); |
| 1376 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 1377 __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| 1377 __ vldr(d6, r7, HeapNumber::kValueOffset); | 1378 __ vldr(d6, r7, HeapNumber::kValueOffset); |
| 1378 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 1379 __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| 1379 __ vldr(d7, r7, HeapNumber::kValueOffset); | 1380 __ vldr(d7, r7, HeapNumber::kValueOffset); |
| 1380 } else { | 1381 } else { |
| 1381 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1382 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 1382 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1383 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 1383 } | 1384 } |
| 1384 __ jmp(both_loaded_as_doubles); | 1385 __ jmp(both_loaded_as_doubles); |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1453 __ sub(mask, mask, Operand(1)); // Make mask. | 1454 __ sub(mask, mask, Operand(1)); // Make mask. |
| 1454 | 1455 |
| 1455 // Calculate the entry in the number string cache. The hash value in the | 1456 // Calculate the entry in the number string cache. The hash value in the |
| 1456 // number string cache for smis is just the smi value, and the hash for | 1457 // number string cache for smis is just the smi value, and the hash for |
| 1457 // doubles is the xor of the upper and lower words. See | 1458 // doubles is the xor of the upper and lower words. See |
| 1458 // Heap::GetNumberStringCache. | 1459 // Heap::GetNumberStringCache. |
| 1459 Label is_smi; | 1460 Label is_smi; |
| 1460 Label load_result_from_cache; | 1461 Label load_result_from_cache; |
| 1461 if (!object_is_smi) { | 1462 if (!object_is_smi) { |
| 1462 __ JumpIfSmi(object, &is_smi); | 1463 __ JumpIfSmi(object, &is_smi); |
| 1463 if (CpuFeatures::IsSupported(VFP3)) { | 1464 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 1464 CpuFeatures::Scope scope(VFP3); | 1465 CpuFeatures::Scope scope(VFP3); |
| 1465 __ CheckMap(object, | 1466 __ CheckMap(object, |
| 1466 scratch1, | 1467 scratch1, |
| 1467 Heap::kHeapNumberMapRootIndex, | 1468 Heap::kHeapNumberMapRootIndex, |
| 1468 not_found, | 1469 not_found, |
| 1469 true); | 1470 true); |
| 1470 | 1471 |
| 1471 STATIC_ASSERT(8 == kDoubleSize); | 1472 STATIC_ASSERT(8 == kDoubleSize); |
| 1472 __ add(scratch1, | 1473 __ add(scratch1, |
| 1473 object, | 1474 object, |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1510 // Check if the entry is the smi we are looking for. | 1511 // Check if the entry is the smi we are looking for. |
| 1511 Register probe = mask; | 1512 Register probe = mask; |
| 1512 __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | 1513 __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
| 1513 __ cmp(object, probe); | 1514 __ cmp(object, probe); |
| 1514 __ b(ne, not_found); | 1515 __ b(ne, not_found); |
| 1515 | 1516 |
| 1516 // Get the result from the cache. | 1517 // Get the result from the cache. |
| 1517 __ bind(&load_result_from_cache); | 1518 __ bind(&load_result_from_cache); |
| 1518 __ ldr(result, | 1519 __ ldr(result, |
| 1519 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); | 1520 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); |
| 1520 __ IncrementCounter(&Counters::number_to_string_native, | 1521 __ IncrementCounter(COUNTERS->number_to_string_native(), |
| 1521 1, | 1522 1, |
| 1522 scratch1, | 1523 scratch1, |
| 1523 scratch2); | 1524 scratch2); |
| 1524 } | 1525 } |
| 1525 | 1526 |
| 1526 | 1527 |
| 1527 void NumberToStringStub::Generate(MacroAssembler* masm) { | 1528 void NumberToStringStub::Generate(MacroAssembler* masm) { |
| 1528 Label runtime; | 1529 Label runtime; |
| 1529 | 1530 |
| 1530 __ ldr(r1, MemOperand(sp, 0)); | 1531 __ ldr(r1, MemOperand(sp, 0)); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1586 // 4) Jump to lhs_not_nan. | 1587 // 4) Jump to lhs_not_nan. |
| 1587 // In cases 3 and 4 we have found out we were dealing with a number-number | 1588 // In cases 3 and 4 we have found out we were dealing with a number-number |
| 1588 // comparison. If VFP3 is supported the double values of the numbers have | 1589 // comparison. If VFP3 is supported the double values of the numbers have |
| 1589 // been loaded into d7 and d6. Otherwise, the double values have been loaded | 1590 // been loaded into d7 and d6. Otherwise, the double values have been loaded |
| 1590 // into r0, r1, r2, and r3. | 1591 // into r0, r1, r2, and r3. |
| 1591 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); | 1592 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); |
| 1592 | 1593 |
| 1593 __ bind(&both_loaded_as_doubles); | 1594 __ bind(&both_loaded_as_doubles); |
| 1594 // The arguments have been converted to doubles and stored in d6 and d7, if | 1595 // The arguments have been converted to doubles and stored in d6 and d7, if |
| 1595 // VFP3 is supported, or in r0, r1, r2, and r3. | 1596 // VFP3 is supported, or in r0, r1, r2, and r3. |
| 1596 if (CpuFeatures::IsSupported(VFP3)) { | 1597 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 1597 __ bind(&lhs_not_nan); | 1598 __ bind(&lhs_not_nan); |
| 1598 CpuFeatures::Scope scope(VFP3); | 1599 CpuFeatures::Scope scope(VFP3); |
| 1599 Label no_nan; | 1600 Label no_nan; |
| 1600 // ARMv7 VFP3 instructions to implement double precision comparison. | 1601 // ARMv7 VFP3 instructions to implement double precision comparison. |
| 1601 __ VFPCompareAndSetFlags(d7, d6); | 1602 __ VFPCompareAndSetFlags(d7, d6); |
| 1602 Label nan; | 1603 Label nan; |
| 1603 __ b(vs, &nan); | 1604 __ b(vs, &nan); |
| 1604 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 1605 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| 1605 __ mov(r0, Operand(LESS), LeaveCC, lt); | 1606 __ mov(r0, Operand(LESS), LeaveCC, lt); |
| 1606 __ mov(r0, Operand(GREATER), LeaveCC, gt); | 1607 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1656 // Assumes that r2 is the type of rhs_ on entry. | 1657 // Assumes that r2 is the type of rhs_ on entry. |
| 1657 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); | 1658 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); |
| 1658 } | 1659 } |
| 1659 | 1660 |
| 1660 // Check for both being sequential ASCII strings, and inline if that is the | 1661 // Check for both being sequential ASCII strings, and inline if that is the |
| 1661 // case. | 1662 // case. |
| 1662 __ bind(&flat_string_check); | 1663 __ bind(&flat_string_check); |
| 1663 | 1664 |
| 1664 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); | 1665 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); |
| 1665 | 1666 |
| 1666 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); | 1667 __ IncrementCounter(COUNTERS->string_compare_native(), 1, r2, r3); |
| 1667 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, | 1668 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, |
| 1668 lhs_, | 1669 lhs_, |
| 1669 rhs_, | 1670 rhs_, |
| 1670 r2, | 1671 r2, |
| 1671 r3, | 1672 r3, |
| 1672 r4, | 1673 r4, |
| 1673 r5); | 1674 r5); |
| 1674 // Never falls through to here. | 1675 // Never falls through to here. |
| 1675 | 1676 |
| 1676 __ bind(&slow); | 1677 __ bind(&slow); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1696 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 1697 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| 1697 // tagged as a small integer. | 1698 // tagged as a small integer. |
| 1698 __ InvokeBuiltin(native, JUMP_JS); | 1699 __ InvokeBuiltin(native, JUMP_JS); |
| 1699 } | 1700 } |
| 1700 | 1701 |
| 1701 | 1702 |
| 1702 // This stub does not handle the inlined cases (Smis, Booleans, undefined). | 1703 // This stub does not handle the inlined cases (Smis, Booleans, undefined). |
| 1703 // The stub returns zero for false, and a non-zero value for true. | 1704 // The stub returns zero for false, and a non-zero value for true. |
| 1704 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1705 void ToBooleanStub::Generate(MacroAssembler* masm) { |
| 1705 // This stub uses VFP3 instructions. | 1706 // This stub uses VFP3 instructions. |
| 1706 ASSERT(CpuFeatures::IsEnabled(VFP3)); | 1707 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); |
| 1707 | 1708 |
| 1708 Label false_result; | 1709 Label false_result; |
| 1709 Label not_heap_number; | 1710 Label not_heap_number; |
| 1710 Register scratch = r9.is(tos_) ? r7 : r9; | 1711 Register scratch = r9.is(tos_) ? r7 : r9; |
| 1711 | 1712 |
| 1712 __ LoadRoot(ip, Heap::kNullValueRootIndex); | 1713 __ LoadRoot(ip, Heap::kNullValueRootIndex); |
| 1713 __ cmp(tos_, ip); | 1714 __ cmp(tos_, ip); |
| 1714 __ b(eq, &false_result); | 1715 __ b(eq, &false_result); |
| 1715 | 1716 |
| 1716 // HeapNumber => false iff +0, -0, or NaN. | 1717 // HeapNumber => false iff +0, -0, or NaN. |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1782 // to call the C-implemented binary fp operation routines we need to end up | 1783 // to call the C-implemented binary fp operation routines we need to end up |
| 1783 // with the double precision floating point operands in r0 and r1 (for the | 1784 // with the double precision floating point operands in r0 and r1 (for the |
| 1784 // value in r1) and r2 and r3 (for the value in r0). | 1785 // value in r1) and r2 and r3 (for the value in r0). |
| 1785 void GenericBinaryOpStub::HandleBinaryOpSlowCases( | 1786 void GenericBinaryOpStub::HandleBinaryOpSlowCases( |
| 1786 MacroAssembler* masm, | 1787 MacroAssembler* masm, |
| 1787 Label* not_smi, | 1788 Label* not_smi, |
| 1788 Register lhs, | 1789 Register lhs, |
| 1789 Register rhs, | 1790 Register rhs, |
| 1790 const Builtins::JavaScript& builtin) { | 1791 const Builtins::JavaScript& builtin) { |
| 1791 Label slow, slow_reverse, do_the_call; | 1792 Label slow, slow_reverse, do_the_call; |
| 1792 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; | 1793 bool use_fp_registers = |
| 1794 Isolate::Current()->cpu_features()->IsSupported(VFP3) && |
| 1795 Token::MOD != op_; |
| 1793 | 1796 |
| 1794 ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); | 1797 ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); |
| 1795 Register heap_number_map = r6; | 1798 Register heap_number_map = r6; |
| 1796 | 1799 |
| 1797 if (ShouldGenerateSmiCode()) { | 1800 if (ShouldGenerateSmiCode()) { |
| 1798 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1801 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1799 | 1802 |
| 1800 // Smi-smi case (overflow). | 1803 // Smi-smi case (overflow). |
| 1801 // Since both are Smis there is no heap number to overwrite, so allocate. | 1804 // Since both are Smis there is no heap number to overwrite, so allocate. |
| 1802 // The new heap number is in r5. r3 and r7 are scratch. | 1805 // The new heap number is in r5. r3 and r7 are scratch. |
| 1803 __ AllocateHeapNumber( | 1806 __ AllocateHeapNumber( |
| 1804 r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); | 1807 r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); |
| 1805 | 1808 |
| 1806 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, | 1809 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, |
| 1807 // using registers d7 and d6 for the double values. | 1810 // using registers d7 and d6 for the double values. |
| 1808 if (CpuFeatures::IsSupported(VFP3)) { | 1811 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 1809 CpuFeatures::Scope scope(VFP3); | 1812 CpuFeatures::Scope scope(VFP3); |
| 1810 __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); | 1813 __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); |
| 1811 __ vmov(s15, r7); | 1814 __ vmov(s15, r7); |
| 1812 __ vcvt_f64_s32(d7, s15); | 1815 __ vcvt_f64_s32(d7, s15); |
| 1813 __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); | 1816 __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); |
| 1814 __ vmov(s13, r7); | 1817 __ vmov(s13, r7); |
| 1815 __ vcvt_f64_s32(d6, s13); | 1818 __ vcvt_f64_s32(d6, s13); |
| 1816 if (!use_fp_registers) { | 1819 if (!use_fp_registers) { |
| 1817 __ vmov(r2, r3, d7); | 1820 __ vmov(r2, r3, d7); |
| 1818 __ vmov(r0, r1, d6); | 1821 __ vmov(r0, r1, d6); |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1894 // Calling convention says that second double is in r2 and r3. | 1897 // Calling convention says that second double is in r2 and r3. |
| 1895 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 1898 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| 1896 } | 1899 } |
| 1897 __ jmp(&finished_loading_r0); | 1900 __ jmp(&finished_loading_r0); |
| 1898 __ bind(&r0_is_smi); | 1901 __ bind(&r0_is_smi); |
| 1899 if (mode_ == OVERWRITE_RIGHT) { | 1902 if (mode_ == OVERWRITE_RIGHT) { |
| 1900 // We can't overwrite a Smi so get address of new heap number into r5. | 1903 // We can't overwrite a Smi so get address of new heap number into r5. |
| 1901 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 1904 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
| 1902 } | 1905 } |
| 1903 | 1906 |
| 1904 if (CpuFeatures::IsSupported(VFP3)) { | 1907 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 1905 CpuFeatures::Scope scope(VFP3); | 1908 CpuFeatures::Scope scope(VFP3); |
| 1906 // Convert smi in r0 to double in d7. | 1909 // Convert smi in r0 to double in d7. |
| 1907 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 1910 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
| 1908 __ vmov(s15, r7); | 1911 __ vmov(s15, r7); |
| 1909 __ vcvt_f64_s32(d7, s15); | 1912 __ vcvt_f64_s32(d7, s15); |
| 1910 if (!use_fp_registers) { | 1913 if (!use_fp_registers) { |
| 1911 __ vmov(r2, r3, d7); | 1914 __ vmov(r2, r3, d7); |
| 1912 } | 1915 } |
| 1913 } else { | 1916 } else { |
| 1914 // Write Smi from r0 to r3 and r2 in double format. | 1917 // Write Smi from r0 to r3 and r2 in double format. |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1951 // Calling convention says that first double is in r0 and r1. | 1954 // Calling convention says that first double is in r0 and r1. |
| 1952 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 1955 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
| 1953 } | 1956 } |
| 1954 __ jmp(&finished_loading_r1); | 1957 __ jmp(&finished_loading_r1); |
| 1955 __ bind(&r1_is_smi); | 1958 __ bind(&r1_is_smi); |
| 1956 if (mode_ == OVERWRITE_LEFT) { | 1959 if (mode_ == OVERWRITE_LEFT) { |
| 1957 // We can't overwrite a Smi so get address of new heap number into r5. | 1960 // We can't overwrite a Smi so get address of new heap number into r5. |
| 1958 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 1961 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
| 1959 } | 1962 } |
| 1960 | 1963 |
| 1961 if (CpuFeatures::IsSupported(VFP3)) { | 1964 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 1962 CpuFeatures::Scope scope(VFP3); | 1965 CpuFeatures::Scope scope(VFP3); |
| 1963 // Convert smi in r1 to double in d6. | 1966 // Convert smi in r1 to double in d6. |
| 1964 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 1967 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
| 1965 __ vmov(s13, r7); | 1968 __ vmov(s13, r7); |
| 1966 __ vcvt_f64_s32(d6, s13); | 1969 __ vcvt_f64_s32(d6, s13); |
| 1967 if (!use_fp_registers) { | 1970 if (!use_fp_registers) { |
| 1968 __ vmov(r0, r1, d6); | 1971 __ vmov(r0, r1, d6); |
| 1969 } | 1972 } |
| 1970 } else { | 1973 } else { |
| 1971 // Write Smi from r1 to r1 and r0 in double format. | 1974 // Write Smi from r1 to r1 and r0 in double format. |
| (...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2163 __ mov(r2, Operand(r3, ASR, r2)); | 2166 __ mov(r2, Operand(r3, ASR, r2)); |
| 2164 break; | 2167 break; |
| 2165 case Token::SHR: | 2168 case Token::SHR: |
| 2166 // Use only the 5 least significant bits of the shift count. | 2169 // Use only the 5 least significant bits of the shift count. |
| 2167 __ and_(r2, r2, Operand(0x1f)); | 2170 __ and_(r2, r2, Operand(0x1f)); |
| 2168 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 2171 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| 2169 // SHR is special because it is required to produce a positive answer. | 2172 // SHR is special because it is required to produce a positive answer. |
| 2170 // The code below for writing into heap numbers isn't capable of writing | 2173 // The code below for writing into heap numbers isn't capable of writing |
| 2171 // the register as an unsigned int so we go to slow case if we hit this | 2174 // the register as an unsigned int so we go to slow case if we hit this |
| 2172 // case. | 2175 // case. |
| 2173 if (CpuFeatures::IsSupported(VFP3)) { | 2176 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 2174 __ b(mi, &result_not_a_smi); | 2177 __ b(mi, &result_not_a_smi); |
| 2175 } else { | 2178 } else { |
| 2176 __ b(mi, &slow); | 2179 __ b(mi, &slow); |
| 2177 } | 2180 } |
| 2178 break; | 2181 break; |
| 2179 case Token::SHL: | 2182 case Token::SHL: |
| 2180 // Use only the 5 least significant bits of the shift count. | 2183 // Use only the 5 least significant bits of the shift count. |
| 2181 __ and_(r2, r2, Operand(0x1f)); | 2184 __ and_(r2, r2, Operand(0x1f)); |
| 2182 __ mov(r2, Operand(r3, LSL, r2)); | 2185 __ mov(r2, Operand(r3, LSL, r2)); |
| 2183 break; | 2186 break; |
| (...skipping 27 matching lines...) Expand all Loading... |
| 2211 default: break; | 2214 default: break; |
| 2212 } | 2215 } |
| 2213 __ bind(&got_a_heap_number); | 2216 __ bind(&got_a_heap_number); |
| 2214 // r2: Answer as signed int32. | 2217 // r2: Answer as signed int32. |
| 2215 // r5: Heap number to write answer into. | 2218 // r5: Heap number to write answer into. |
| 2216 | 2219 |
| 2217 // Nothing can go wrong now, so move the heap number to r0, which is the | 2220 // Nothing can go wrong now, so move the heap number to r0, which is the |
| 2218 // result. | 2221 // result. |
| 2219 __ mov(r0, Operand(r5)); | 2222 __ mov(r0, Operand(r5)); |
| 2220 | 2223 |
| 2221 if (CpuFeatures::IsSupported(VFP3)) { | 2224 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 2222 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. | 2225 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. |
| 2223 CpuFeatures::Scope scope(VFP3); | 2226 CpuFeatures::Scope scope(VFP3); |
| 2224 __ vmov(s0, r2); | 2227 __ vmov(s0, r2); |
| 2225 if (op_ == Token::SHR) { | 2228 if (op_ == Token::SHR) { |
| 2226 __ vcvt_f64_u32(d0, s0); | 2229 __ vcvt_f64_u32(d0, s0); |
| 2227 } else { | 2230 } else { |
| 2228 __ vcvt_f64_s32(d0, s0); | 2231 __ vcvt_f64_s32(d0, s0); |
| 2229 } | 2232 } |
| 2230 __ sub(r3, r0, Operand(kHeapObjectTag)); | 2233 __ sub(r3, r0, Operand(kHeapObjectTag)); |
| 2231 __ vstr(d0, r3, HeapNumber::kValueOffset); | 2234 __ vstr(d0, r3, HeapNumber::kValueOffset); |
| (...skipping 652 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2884 break; | 2887 break; |
| 2885 default: | 2888 default: |
| 2886 UNREACHABLE(); | 2889 UNREACHABLE(); |
| 2887 } | 2890 } |
| 2888 } | 2891 } |
| 2889 | 2892 |
| 2890 | 2893 |
| 2891 const char* TypeRecordingBinaryOpStub::GetName() { | 2894 const char* TypeRecordingBinaryOpStub::GetName() { |
| 2892 if (name_ != NULL) return name_; | 2895 if (name_ != NULL) return name_; |
| 2893 const int kMaxNameLength = 100; | 2896 const int kMaxNameLength = 100; |
| 2894 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); | 2897 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |
| 2898 kMaxNameLength); |
| 2895 if (name_ == NULL) return "OOM"; | 2899 if (name_ == NULL) return "OOM"; |
| 2896 const char* op_name = Token::Name(op_); | 2900 const char* op_name = Token::Name(op_); |
| 2897 const char* overwrite_name; | 2901 const char* overwrite_name; |
| 2898 switch (mode_) { | 2902 switch (mode_) { |
| 2899 case NO_OVERWRITE: overwrite_name = "Alloc"; break; | 2903 case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
| 2900 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; | 2904 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
| 2901 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; | 2905 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
| 2902 default: overwrite_name = "UnknownOverwrite"; break; | 2906 default: overwrite_name = "UnknownOverwrite"; break; |
| 2903 } | 2907 } |
| 2904 | 2908 |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3058 | 3062 |
| 3059 switch (op_) { | 3063 switch (op_) { |
| 3060 case Token::ADD: | 3064 case Token::ADD: |
| 3061 case Token::SUB: | 3065 case Token::SUB: |
| 3062 case Token::MUL: | 3066 case Token::MUL: |
| 3063 case Token::DIV: | 3067 case Token::DIV: |
| 3064 case Token::MOD: { | 3068 case Token::MOD: { |
| 3065 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 | 3069 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 |
| 3066 // depending on whether VFP3 is available or not. | 3070 // depending on whether VFP3 is available or not. |
| 3067 FloatingPointHelper::Destination destination = | 3071 FloatingPointHelper::Destination destination = |
| 3068 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? | 3072 Isolate::Current()->cpu_features()->IsSupported(VFP3) && |
| 3073 op_ != Token::MOD ? |
| 3069 FloatingPointHelper::kVFPRegisters : | 3074 FloatingPointHelper::kVFPRegisters : |
| 3070 FloatingPointHelper::kCoreRegisters; | 3075 FloatingPointHelper::kCoreRegisters; |
| 3071 | 3076 |
| 3072 // Allocate new heap number for result. | 3077 // Allocate new heap number for result. |
| 3073 Register result = r5; | 3078 Register result = r5; |
| 3074 GenerateHeapResultAllocation( | 3079 GenerateHeapResultAllocation( |
| 3075 masm, result, heap_number_map, scratch1, scratch2, gc_required); | 3080 masm, result, heap_number_map, scratch1, scratch2, gc_required); |
| 3076 | 3081 |
| 3077 // Load the operands. | 3082 // Load the operands. |
| 3078 if (smi_operands) { | 3083 if (smi_operands) { |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3170 __ mov(r2, Operand(r3, ASR, r2)); | 3175 __ mov(r2, Operand(r3, ASR, r2)); |
| 3171 break; | 3176 break; |
| 3172 case Token::SHR: | 3177 case Token::SHR: |
| 3173 // Use only the 5 least significant bits of the shift count. | 3178 // Use only the 5 least significant bits of the shift count. |
| 3174 __ GetLeastBitsFromInt32(r2, r2, 5); | 3179 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 3175 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 3180 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| 3176 // SHR is special because it is required to produce a positive answer. | 3181 // SHR is special because it is required to produce a positive answer. |
| 3177 // The code below for writing into heap numbers isn't capable of | 3182 // The code below for writing into heap numbers isn't capable of |
| 3178 // writing the register as an unsigned int so we go to slow case if we | 3183 // writing the register as an unsigned int so we go to slow case if we |
| 3179 // hit this case. | 3184 // hit this case. |
| 3180 if (CpuFeatures::IsSupported(VFP3)) { | 3185 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 3181 __ b(mi, &result_not_a_smi); | 3186 __ b(mi, &result_not_a_smi); |
| 3182 } else { | 3187 } else { |
| 3183 __ b(mi, not_numbers); | 3188 __ b(mi, not_numbers); |
| 3184 } | 3189 } |
| 3185 break; | 3190 break; |
| 3186 case Token::SHL: | 3191 case Token::SHL: |
| 3187 // Use only the 5 least significant bits of the shift count. | 3192 // Use only the 5 least significant bits of the shift count. |
| 3188 __ GetLeastBitsFromInt32(r2, r2, 5); | 3193 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 3189 __ mov(r2, Operand(r3, LSL, r2)); | 3194 __ mov(r2, Operand(r3, LSL, r2)); |
| 3190 break; | 3195 break; |
| (...skipping 18 matching lines...) Expand all Loading... |
| 3209 masm, result, heap_number_map, scratch1, scratch2, gc_required); | 3214 masm, result, heap_number_map, scratch1, scratch2, gc_required); |
| 3210 } | 3215 } |
| 3211 | 3216 |
| 3212 // r2: Answer as signed int32. | 3217 // r2: Answer as signed int32. |
| 3213 // r5: Heap number to write answer into. | 3218 // r5: Heap number to write answer into. |
| 3214 | 3219 |
| 3215 // Nothing can go wrong now, so move the heap number to r0, which is the | 3220 // Nothing can go wrong now, so move the heap number to r0, which is the |
| 3216 // result. | 3221 // result. |
| 3217 __ mov(r0, Operand(r5)); | 3222 __ mov(r0, Operand(r5)); |
| 3218 | 3223 |
| 3219 if (CpuFeatures::IsSupported(VFP3)) { | 3224 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 3220 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | 3225 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| 3221 // mentioned above SHR needs to always produce a positive result. | 3226 // mentioned above SHR needs to always produce a positive result. |
| 3222 CpuFeatures::Scope scope(VFP3); | 3227 CpuFeatures::Scope scope(VFP3); |
| 3223 __ vmov(s0, r2); | 3228 __ vmov(s0, r2); |
| 3224 if (op_ == Token::SHR) { | 3229 if (op_ == Token::SHR) { |
| 3225 __ vcvt_f64_u32(d0, s0); | 3230 __ vcvt_f64_u32(d0, s0); |
| 3226 } else { | 3231 } else { |
| 3227 __ vcvt_f64_s32(d0, s0); | 3232 __ vcvt_f64_s32(d0, s0); |
| 3228 } | 3233 } |
| 3229 __ sub(r3, r0, Operand(kHeapObjectTag)); | 3234 __ sub(r3, r0, Operand(kHeapObjectTag)); |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3338 switch (op_) { | 3343 switch (op_) { |
| 3339 case Token::ADD: | 3344 case Token::ADD: |
| 3340 case Token::SUB: | 3345 case Token::SUB: |
| 3341 case Token::MUL: | 3346 case Token::MUL: |
| 3342 case Token::DIV: | 3347 case Token::DIV: |
| 3343 case Token::MOD: { | 3348 case Token::MOD: { |
| 3344 // Load both operands and check that they are 32-bit integer. | 3349 // Load both operands and check that they are 32-bit integer. |
| 3345 // Jump to type transition if they are not. The registers r0 and r1 (right | 3350 // Jump to type transition if they are not. The registers r0 and r1 (right |
| 3346 // and left) are preserved for the runtime call. | 3351 // and left) are preserved for the runtime call. |
| 3347 FloatingPointHelper::Destination destination = | 3352 FloatingPointHelper::Destination destination = |
| 3348 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? | 3353 Isolate::Current()->cpu_features()->IsSupported(VFP3) && |
| 3354 op_ != Token::MOD ? |
| 3349 FloatingPointHelper::kVFPRegisters : | 3355 FloatingPointHelper::kVFPRegisters : |
| 3350 FloatingPointHelper::kCoreRegisters; | 3356 FloatingPointHelper::kCoreRegisters; |
| 3351 | 3357 |
| 3352 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 3358 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
| 3353 right, | 3359 right, |
| 3354 destination, | 3360 destination, |
| 3355 d7, | 3361 d7, |
| 3356 r2, | 3362 r2, |
| 3357 r3, | 3363 r3, |
| 3358 heap_number_map, | 3364 heap_number_map, |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3513 break; | 3519 break; |
| 3514 case Token::SHR: | 3520 case Token::SHR: |
| 3515 __ and_(r2, r2, Operand(0x1f)); | 3521 __ and_(r2, r2, Operand(0x1f)); |
| 3516 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 3522 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| 3517 // SHR is special because it is required to produce a positive answer. | 3523 // SHR is special because it is required to produce a positive answer. |
| 3518 // We only get a negative result if the shift value (r2) is 0. | 3524 // We only get a negative result if the shift value (r2) is 0. |
| 3519 // This result cannot be respresented as a signed 32-bit integer, try | 3525 // This result cannot be respresented as a signed 32-bit integer, try |
| 3520 // to return a heap number if we can. | 3526 // to return a heap number if we can. |
| 3521 // The non vfp3 code does not support this special case, so jump to | 3527 // The non vfp3 code does not support this special case, so jump to |
| 3522 // runtime if we don't support it. | 3528 // runtime if we don't support it. |
| 3523 if (CpuFeatures::IsSupported(VFP3)) { | 3529 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 3524 __ b(mi, | 3530 __ b(mi, |
| 3525 (result_type_ <= TRBinaryOpIC::INT32) ? &transition | 3531 (result_type_ <= TRBinaryOpIC::INT32) ? &transition |
| 3526 : &return_heap_number); | 3532 : &return_heap_number); |
| 3527 } else { | 3533 } else { |
| 3528 __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition | 3534 __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition |
| 3529 : &call_runtime); | 3535 : &call_runtime); |
| 3530 } | 3536 } |
| 3531 break; | 3537 break; |
| 3532 case Token::SHL: | 3538 case Token::SHL: |
| 3533 __ and_(r2, r2, Operand(0x1f)); | 3539 __ and_(r2, r2, Operand(0x1f)); |
| 3534 __ mov(r2, Operand(r3, LSL, r2)); | 3540 __ mov(r2, Operand(r3, LSL, r2)); |
| 3535 break; | 3541 break; |
| 3536 default: | 3542 default: |
| 3537 UNREACHABLE(); | 3543 UNREACHABLE(); |
| 3538 } | 3544 } |
| 3539 | 3545 |
| 3540 // Check if the result fits in a smi. | 3546 // Check if the result fits in a smi. |
| 3541 __ add(scratch1, r2, Operand(0x40000000), SetCC); | 3547 __ add(scratch1, r2, Operand(0x40000000), SetCC); |
| 3542 // If not try to return a heap number. (We know the result is an int32.) | 3548 // If not try to return a heap number. (We know the result is an int32.) |
| 3543 __ b(mi, &return_heap_number); | 3549 __ b(mi, &return_heap_number); |
| 3544 // Tag the result and return. | 3550 // Tag the result and return. |
| 3545 __ SmiTag(r0, r2); | 3551 __ SmiTag(r0, r2); |
| 3546 __ Ret(); | 3552 __ Ret(); |
| 3547 | 3553 |
| 3548 __ bind(&return_heap_number); | 3554 __ bind(&return_heap_number); |
| 3549 if (CpuFeatures::IsSupported(VFP3)) { | 3555 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 3550 CpuFeatures::Scope scope(VFP3); | 3556 CpuFeatures::Scope scope(VFP3); |
| 3551 heap_number_result = r5; | 3557 heap_number_result = r5; |
| 3552 GenerateHeapResultAllocation(masm, | 3558 GenerateHeapResultAllocation(masm, |
| 3553 heap_number_result, | 3559 heap_number_result, |
| 3554 heap_number_map, | 3560 heap_number_map, |
| 3555 scratch1, | 3561 scratch1, |
| 3556 scratch2, | 3562 scratch2, |
| 3557 &call_runtime); | 3563 &call_runtime); |
| 3558 | 3564 |
| 3559 if (op_ != Token::SHR) { | 3565 if (op_ != Token::SHR) { |
| (...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3743 | 3749 |
| 3744 Label input_not_smi; | 3750 Label input_not_smi; |
| 3745 Label loaded; | 3751 Label loaded; |
| 3746 Label calculate; | 3752 Label calculate; |
| 3747 Label invalid_cache; | 3753 Label invalid_cache; |
| 3748 const Register scratch0 = r9; | 3754 const Register scratch0 = r9; |
| 3749 const Register scratch1 = r7; | 3755 const Register scratch1 = r7; |
| 3750 const Register cache_entry = r0; | 3756 const Register cache_entry = r0; |
| 3751 const bool tagged = (argument_type_ == TAGGED); | 3757 const bool tagged = (argument_type_ == TAGGED); |
| 3752 | 3758 |
| 3753 if (CpuFeatures::IsSupported(VFP3)) { | 3759 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 3754 CpuFeatures::Scope scope(VFP3); | 3760 CpuFeatures::Scope scope(VFP3); |
| 3755 if (tagged) { | 3761 if (tagged) { |
| 3756 // Argument is a number and is on stack and in r0. | 3762 // Argument is a number and is on stack and in r0. |
| 3757 // Load argument and check if it is a smi. | 3763 // Load argument and check if it is a smi. |
| 3758 __ JumpIfNotSmi(r0, &input_not_smi); | 3764 __ JumpIfNotSmi(r0, &input_not_smi); |
| 3759 | 3765 |
| 3760 // Input is a smi. Convert to double and load the low and high words | 3766 // Input is a smi. Convert to double and load the low and high words |
| 3761 // of the double into r2, r3. | 3767 // of the double into r2, r3. |
| 3762 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 3768 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); |
| 3763 __ b(&loaded); | 3769 __ b(&loaded); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 3778 __ vmov(r2, r3, d2); | 3784 __ vmov(r2, r3, d2); |
| 3779 } | 3785 } |
| 3780 __ bind(&loaded); | 3786 __ bind(&loaded); |
| 3781 // r2 = low 32 bits of double value | 3787 // r2 = low 32 bits of double value |
| 3782 // r3 = high 32 bits of double value | 3788 // r3 = high 32 bits of double value |
| 3783 // Compute hash (the shifts are arithmetic): | 3789 // Compute hash (the shifts are arithmetic): |
| 3784 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | 3790 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
| 3785 __ eor(r1, r2, Operand(r3)); | 3791 __ eor(r1, r2, Operand(r3)); |
| 3786 __ eor(r1, r1, Operand(r1, ASR, 16)); | 3792 __ eor(r1, r1, Operand(r1, ASR, 16)); |
| 3787 __ eor(r1, r1, Operand(r1, ASR, 8)); | 3793 __ eor(r1, r1, Operand(r1, ASR, 8)); |
| 3788 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); | 3794 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
| 3789 __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1)); | 3795 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); |
| 3790 | 3796 |
| 3791 // r2 = low 32 bits of double value. | 3797 // r2 = low 32 bits of double value. |
| 3792 // r3 = high 32 bits of double value. | 3798 // r3 = high 32 bits of double value. |
| 3793 // r1 = TranscendentalCache::hash(double value). | 3799 // r1 = TranscendentalCache::hash(double value). |
| 3794 __ mov(cache_entry, | 3800 __ mov(cache_entry, |
| 3795 Operand(ExternalReference::transcendental_cache_array_address())); | 3801 Operand(ExternalReference::transcendental_cache_array_address())); |
| 3796 // r0 points to cache array. | 3802 // r0 points to cache array. |
| 3797 __ ldr(cache_entry, MemOperand(cache_entry, | 3803 __ ldr(cache_entry, MemOperand(cache_entry, type_ * sizeof( |
| 3798 type_ * sizeof(TranscendentalCache::caches_[0]))); | 3804 Isolate::Current()->transcendental_cache()->caches_[0]))); |
| 3799 // r0 points to the cache for the type type_. | 3805 // r0 points to the cache for the type type_. |
| 3800 // If NULL, the cache hasn't been initialized yet, so go through runtime. | 3806 // If NULL, the cache hasn't been initialized yet, so go through runtime. |
| 3801 __ cmp(cache_entry, Operand(0, RelocInfo::NONE)); | 3807 __ cmp(cache_entry, Operand(0, RelocInfo::NONE)); |
| 3802 __ b(eq, &invalid_cache); | 3808 __ b(eq, &invalid_cache); |
| 3803 | 3809 |
| 3804 #ifdef DEBUG | 3810 #ifdef DEBUG |
| 3805 // Check that the layout of cache elements match expectations. | 3811 // Check that the layout of cache elements match expectations. |
| 3806 { TranscendentalCache::Element test_elem[2]; | 3812 { TranscendentalCache::SubCache::Element test_elem[2]; |
| 3807 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | 3813 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); |
| 3808 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | 3814 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); |
| 3809 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | 3815 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); |
| 3810 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | 3816 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
| 3811 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | 3817 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
| 3812 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | 3818 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. |
| 3813 CHECK_EQ(0, elem_in0 - elem_start); | 3819 CHECK_EQ(0, elem_in0 - elem_start); |
| 3814 CHECK_EQ(kIntSize, elem_in1 - elem_start); | 3820 CHECK_EQ(kIntSize, elem_in1 - elem_start); |
| 3815 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | 3821 CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
| 3816 } | 3822 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 3828 // Cache hit. Load result, cleanup and return. | 3834 // Cache hit. Load result, cleanup and return. |
| 3829 if (tagged) { | 3835 if (tagged) { |
| 3830 // Pop input value from stack and load result into r0. | 3836 // Pop input value from stack and load result into r0. |
| 3831 __ pop(); | 3837 __ pop(); |
| 3832 __ mov(r0, Operand(r6)); | 3838 __ mov(r0, Operand(r6)); |
| 3833 } else { | 3839 } else { |
| 3834 // Load result into d2. | 3840 // Load result into d2. |
| 3835 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); | 3841 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
| 3836 } | 3842 } |
| 3837 __ Ret(); | 3843 __ Ret(); |
| 3838 } // if (CpuFeatures::IsSupported(VFP3)) | 3844 } // if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) |
| 3839 | 3845 |
| 3840 __ bind(&calculate); | 3846 __ bind(&calculate); |
| 3841 if (tagged) { | 3847 if (tagged) { |
| 3842 __ bind(&invalid_cache); | 3848 __ bind(&invalid_cache); |
| 3843 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); | 3849 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); |
| 3844 } else { | 3850 } else { |
| 3845 if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE(); | 3851 if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) UNREACHABLE(); |
| 3846 CpuFeatures::Scope scope(VFP3); | 3852 CpuFeatures::Scope scope(VFP3); |
| 3847 | 3853 |
| 3848 Label no_update; | 3854 Label no_update; |
| 3849 Label skip_cache; | 3855 Label skip_cache; |
| 3850 const Register heap_number_map = r5; | 3856 const Register heap_number_map = r5; |
| 3851 | 3857 |
| 3852 // Call C function to calculate the result and update the cache. | 3858 // Call C function to calculate the result and update the cache. |
| 3853 // Register r0 holds precalculated cache entry address; preserve | 3859 // Register r0 holds precalculated cache entry address; preserve |
| 3854 // it on the stack and pop it into register cache_entry after the | 3860 // it on the stack and pop it into register cache_entry after the |
| 3855 // call. | 3861 // call. |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4032 | 4038 |
| 4033 __ bind(&try_float); | 4039 __ bind(&try_float); |
| 4034 if (!overwrite_ == UNARY_OVERWRITE) { | 4040 if (!overwrite_ == UNARY_OVERWRITE) { |
| 4035 // Allocate a fresh heap number, but don't overwrite r0 until | 4041 // Allocate a fresh heap number, but don't overwrite r0 until |
| 4036 // we're sure we can do it without going through the slow case | 4042 // we're sure we can do it without going through the slow case |
| 4037 // that needs the value in r0. | 4043 // that needs the value in r0. |
| 4038 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | 4044 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
| 4039 __ mov(r0, Operand(r2)); | 4045 __ mov(r0, Operand(r2)); |
| 4040 } | 4046 } |
| 4041 | 4047 |
| 4042 if (CpuFeatures::IsSupported(VFP3)) { | 4048 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 4043 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. | 4049 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. |
| 4044 CpuFeatures::Scope scope(VFP3); | 4050 CpuFeatures::Scope scope(VFP3); |
| 4045 __ vmov(s0, r1); | 4051 __ vmov(s0, r1); |
| 4046 __ vcvt_f64_s32(d0, s0); | 4052 __ vcvt_f64_s32(d0, s0); |
| 4047 __ sub(r2, r0, Operand(kHeapObjectTag)); | 4053 __ sub(r2, r0, Operand(kHeapObjectTag)); |
| 4048 __ vstr(d0, r2, HeapNumber::kValueOffset); | 4054 __ vstr(d0, r2, HeapNumber::kValueOffset); |
| 4049 } else { | 4055 } else { |
| 4050 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | 4056 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not |
| 4051 // have to set up a frame. | 4057 // have to set up a frame. |
| 4052 WriteInt32ToHeapNumberStub stub(r1, r0, r2); | 4058 WriteInt32ToHeapNumberStub stub(r1, r0, r2); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 4073 break; | 4079 break; |
| 4074 default: | 4080 default: |
| 4075 UNREACHABLE(); | 4081 UNREACHABLE(); |
| 4076 } | 4082 } |
| 4077 } | 4083 } |
| 4078 | 4084 |
| 4079 | 4085 |
| 4080 void MathPowStub::Generate(MacroAssembler* masm) { | 4086 void MathPowStub::Generate(MacroAssembler* masm) { |
| 4081 Label call_runtime; | 4087 Label call_runtime; |
| 4082 | 4088 |
| 4083 if (CpuFeatures::IsSupported(VFP3)) { | 4089 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 4084 CpuFeatures::Scope scope(VFP3); | 4090 CpuFeatures::Scope scope(VFP3); |
| 4085 | 4091 |
| 4086 Label base_not_smi; | 4092 Label base_not_smi; |
| 4087 Label exponent_not_smi; | 4093 Label exponent_not_smi; |
| 4088 Label convert_exponent; | 4094 Label convert_exponent; |
| 4089 | 4095 |
| 4090 const Register base = r0; | 4096 const Register base = r0; |
| 4091 const Register exponent = r1; | 4097 const Register exponent = r1; |
| 4092 const Register heapnumbermap = r5; | 4098 const Register heapnumbermap = r5; |
| 4093 const Register heapnumber = r6; | 4099 const Register heapnumber = r6; |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4233 ASSERT(IsPowerOf2(frame_alignment)); | 4239 ASSERT(IsPowerOf2(frame_alignment)); |
| 4234 __ tst(sp, Operand(frame_alignment_mask)); | 4240 __ tst(sp, Operand(frame_alignment_mask)); |
| 4235 __ b(eq, &alignment_as_expected); | 4241 __ b(eq, &alignment_as_expected); |
| 4236 // Don't use Check here, as it will call Runtime_Abort re-entering here. | 4242 // Don't use Check here, as it will call Runtime_Abort re-entering here. |
| 4237 __ stop("Unexpected alignment"); | 4243 __ stop("Unexpected alignment"); |
| 4238 __ bind(&alignment_as_expected); | 4244 __ bind(&alignment_as_expected); |
| 4239 } | 4245 } |
| 4240 } | 4246 } |
| 4241 #endif | 4247 #endif |
| 4242 | 4248 |
| 4249 __ mov(r2, Operand(ExternalReference::isolate_address())); |
| 4250 |
| 4251 |
| 4243 // TODO(1242173): To let the GC traverse the return address of the exit | 4252 // TODO(1242173): To let the GC traverse the return address of the exit |
| 4244 // frames, we need to know where the return address is. Right now, | 4253 // frames, we need to know where the return address is. Right now, |
| 4245 // we store it on the stack to be able to find it again, but we never | 4254 // we store it on the stack to be able to find it again, but we never |
| 4246 // restore from it in case of changes, which makes it impossible to | 4255 // restore from it in case of changes, which makes it impossible to |
| 4247 // support moving the C entry code stub. This should be fixed, but currently | 4256 // support moving the C entry code stub. This should be fixed, but currently |
| 4248 // this is OK because the CEntryStub gets generated so early in the V8 boot | 4257 // this is OK because the CEntryStub gets generated so early in the V8 boot |
| 4249 // sequence that it is not moving ever. | 4258 // sequence that it is not moving ever. |
| 4250 | 4259 |
| 4251 // Compute the return address in lr to return to after the jump below. Pc is | 4260 // Compute the return address in lr to return to after the jump below. Pc is |
| 4252 // already at '+ 8' from the current instruction but return is after three | 4261 // already at '+ 8' from the current instruction but return is after three |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4288 __ b(eq, &retry); | 4297 __ b(eq, &retry); |
| 4289 | 4298 |
| 4290 // Special handling of out of memory exceptions. | 4299 // Special handling of out of memory exceptions. |
| 4291 Failure* out_of_memory = Failure::OutOfMemoryException(); | 4300 Failure* out_of_memory = Failure::OutOfMemoryException(); |
| 4292 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | 4301 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); |
| 4293 __ b(eq, throw_out_of_memory_exception); | 4302 __ b(eq, throw_out_of_memory_exception); |
| 4294 | 4303 |
| 4295 // Retrieve the pending exception and clear the variable. | 4304 // Retrieve the pending exception and clear the variable. |
| 4296 __ mov(ip, Operand(ExternalReference::the_hole_value_location())); | 4305 __ mov(ip, Operand(ExternalReference::the_hole_value_location())); |
| 4297 __ ldr(r3, MemOperand(ip)); | 4306 __ ldr(r3, MemOperand(ip)); |
| 4298 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | 4307 __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address))); |
| 4299 __ ldr(r0, MemOperand(ip)); | 4308 __ ldr(r0, MemOperand(ip)); |
| 4300 __ str(r3, MemOperand(ip)); | 4309 __ str(r3, MemOperand(ip)); |
| 4301 | 4310 |
| 4302 // Special handling of termination exceptions which are uncatchable | 4311 // Special handling of termination exceptions which are uncatchable |
| 4303 // by javascript code. | 4312 // by javascript code. |
| 4304 __ cmp(r0, Operand(Factory::termination_exception())); | 4313 __ cmp(r0, Operand(FACTORY->termination_exception())); |
| 4305 __ b(eq, throw_termination_exception); | 4314 __ b(eq, throw_termination_exception); |
| 4306 | 4315 |
| 4307 // Handle normal exception. | 4316 // Handle normal exception. |
| 4308 __ jmp(throw_normal_exception); | 4317 __ jmp(throw_normal_exception); |
| 4309 | 4318 |
| 4310 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying | 4319 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying |
| 4311 } | 4320 } |
| 4312 | 4321 |
| 4313 | 4322 |
| 4314 void CEntryStub::Generate(MacroAssembler* masm) { | 4323 void CEntryStub::Generate(MacroAssembler* masm) { |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4406 // Push a frame with special values setup to mark it as an entry frame. | 4415 // Push a frame with special values setup to mark it as an entry frame. |
| 4407 // r0: code entry | 4416 // r0: code entry |
| 4408 // r1: function | 4417 // r1: function |
| 4409 // r2: receiver | 4418 // r2: receiver |
| 4410 // r3: argc | 4419 // r3: argc |
| 4411 // r4: argv | 4420 // r4: argv |
| 4412 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 4421 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
| 4413 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 4422 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
| 4414 __ mov(r7, Operand(Smi::FromInt(marker))); | 4423 __ mov(r7, Operand(Smi::FromInt(marker))); |
| 4415 __ mov(r6, Operand(Smi::FromInt(marker))); | 4424 __ mov(r6, Operand(Smi::FromInt(marker))); |
| 4416 __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); | 4425 __ mov(r5, Operand(ExternalReference(Isolate::k_c_entry_fp_address))); |
| 4417 __ ldr(r5, MemOperand(r5)); | 4426 __ ldr(r5, MemOperand(r5)); |
| 4418 __ Push(r8, r7, r6, r5); | 4427 __ Push(r8, r7, r6, r5); |
| 4419 | 4428 |
| 4420 // Setup frame pointer for the frame to be pushed. | 4429 // Setup frame pointer for the frame to be pushed. |
| 4421 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 4430 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
| 4422 | 4431 |
| 4423 #ifdef ENABLE_LOGGING_AND_PROFILING | 4432 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 4424 // If this is the outermost JS call, set js_entry_sp value. | 4433 // If this is the outermost JS call, set js_entry_sp value. |
| 4425 ExternalReference js_entry_sp(Top::k_js_entry_sp_address); | 4434 ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address); |
| 4426 __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 4435 __ mov(r5, Operand(ExternalReference(js_entry_sp))); |
| 4427 __ ldr(r6, MemOperand(r5)); | 4436 __ ldr(r6, MemOperand(r5)); |
| 4428 __ cmp(r6, Operand(0, RelocInfo::NONE)); | 4437 __ cmp(r6, Operand(0, RelocInfo::NONE)); |
| 4429 __ str(fp, MemOperand(r5), eq); | 4438 __ str(fp, MemOperand(r5), eq); |
| 4430 #endif | 4439 #endif |
| 4431 | 4440 |
| 4432 // Call a faked try-block that does the invoke. | 4441 // Call a faked try-block that does the invoke. |
| 4433 __ bl(&invoke); | 4442 __ bl(&invoke); |
| 4434 | 4443 |
| 4435 // Caught exception: Store result (exception) in the pending | 4444 // Caught exception: Store result (exception) in the pending |
| 4436 // exception field in the JSEnv and return a failure sentinel. | 4445 // exception field in the JSEnv and return a failure sentinel. |
| 4437 // Coming in here the fp will be invalid because the PushTryHandler below | 4446 // Coming in here the fp will be invalid because the PushTryHandler below |
| 4438 // sets it to 0 to signal the existence of the JSEntry frame. | 4447 // sets it to 0 to signal the existence of the JSEntry frame. |
| 4439 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | 4448 __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address))); |
| 4440 __ str(r0, MemOperand(ip)); | 4449 __ str(r0, MemOperand(ip)); |
| 4441 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); | 4450 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); |
| 4442 __ b(&exit); | 4451 __ b(&exit); |
| 4443 | 4452 |
| 4444 // Invoke: Link this frame into the handler chain. | 4453 // Invoke: Link this frame into the handler chain. |
| 4445 __ bind(&invoke); | 4454 __ bind(&invoke); |
| 4446 // Must preserve r0-r4, r5-r7 are available. | 4455 // Must preserve r0-r4, r5-r7 are available. |
| 4447 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); | 4456 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); |
| 4448 // If an exception not caught by another handler occurs, this handler | 4457 // If an exception not caught by another handler occurs, this handler |
| 4449 // returns control to the code after the bl(&invoke) above, which | 4458 // returns control to the code after the bl(&invoke) above, which |
| 4450 // restores all kCalleeSaved registers (including cp and fp) to their | 4459 // restores all kCalleeSaved registers (including cp and fp) to their |
| 4451 // saved values before returning a failure to C. | 4460 // saved values before returning a failure to C. |
| 4452 | 4461 |
| 4453 // Clear any pending exceptions. | 4462 // Clear any pending exceptions. |
| 4454 __ mov(ip, Operand(ExternalReference::the_hole_value_location())); | 4463 __ mov(ip, Operand(ExternalReference::the_hole_value_location())); |
| 4455 __ ldr(r5, MemOperand(ip)); | 4464 __ ldr(r5, MemOperand(ip)); |
| 4456 __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | 4465 __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address))); |
| 4457 __ str(r5, MemOperand(ip)); | 4466 __ str(r5, MemOperand(ip)); |
| 4458 | 4467 |
| 4459 // Invoke the function by calling through JS entry trampoline builtin. | 4468 // Invoke the function by calling through JS entry trampoline builtin. |
| 4460 // Notice that we cannot store a reference to the trampoline code directly in | 4469 // Notice that we cannot store a reference to the trampoline code directly in |
| 4461 // this stub, because runtime stubs are not traversed when doing GC. | 4470 // this stub, because runtime stubs are not traversed when doing GC. |
| 4462 | 4471 |
| 4463 // Expected registers by Builtins::JSEntryTrampoline | 4472 // Expected registers by Builtins::JSEntryTrampoline |
| 4464 // r0: code entry | 4473 // r0: code entry |
| 4465 // r1: function | 4474 // r1: function |
| 4466 // r2: receiver | 4475 // r2: receiver |
| (...skipping 12 matching lines...) Expand all Loading... |
| 4479 // macro for the add instruction because we don't want the coverage tool | 4488 // macro for the add instruction because we don't want the coverage tool |
| 4480 // inserting instructions here after we read the pc. | 4489 // inserting instructions here after we read the pc. |
| 4481 __ mov(lr, Operand(pc)); | 4490 __ mov(lr, Operand(pc)); |
| 4482 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4491 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 4483 | 4492 |
| 4484 // Unlink this frame from the handler chain. When reading the | 4493 // Unlink this frame from the handler chain. When reading the |
| 4485 // address of the next handler, there is no need to use the address | 4494 // address of the next handler, there is no need to use the address |
| 4486 // displacement since the current stack pointer (sp) points directly | 4495 // displacement since the current stack pointer (sp) points directly |
| 4487 // to the stack handler. | 4496 // to the stack handler. |
| 4488 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); | 4497 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); |
| 4489 __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); | 4498 __ mov(ip, Operand(ExternalReference(Isolate::k_handler_address))); |
| 4490 __ str(r3, MemOperand(ip)); | 4499 __ str(r3, MemOperand(ip)); |
| 4491 // No need to restore registers | 4500 // No need to restore registers |
| 4492 __ add(sp, sp, Operand(StackHandlerConstants::kSize)); | 4501 __ add(sp, sp, Operand(StackHandlerConstants::kSize)); |
| 4493 | 4502 |
| 4494 #ifdef ENABLE_LOGGING_AND_PROFILING | 4503 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 4495 // If current FP value is the same as js_entry_sp value, it means that | 4504 // If current FP value is the same as js_entry_sp value, it means that |
| 4496 // the current function is the outermost. | 4505 // the current function is the outermost. |
| 4497 __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 4506 __ mov(r5, Operand(ExternalReference(js_entry_sp))); |
| 4498 __ ldr(r6, MemOperand(r5)); | 4507 __ ldr(r6, MemOperand(r5)); |
| 4499 __ cmp(fp, Operand(r6)); | 4508 __ cmp(fp, Operand(r6)); |
| 4500 __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq); | 4509 __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq); |
| 4501 __ str(r6, MemOperand(r5), eq); | 4510 __ str(r6, MemOperand(r5), eq); |
| 4502 #endif | 4511 #endif |
| 4503 | 4512 |
| 4504 __ bind(&exit); // r0 holds result | 4513 __ bind(&exit); // r0 holds result |
| 4505 // Restore the top frame descriptors from the stack. | 4514 // Restore the top frame descriptors from the stack. |
| 4506 __ pop(r3); | 4515 __ pop(r3); |
| 4507 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); | 4516 __ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address))); |
| 4508 __ str(r3, MemOperand(ip)); | 4517 __ str(r3, MemOperand(ip)); |
| 4509 | 4518 |
| 4510 // Reset the stack to the callee saved registers. | 4519 // Reset the stack to the callee saved registers. |
| 4511 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 4520 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
| 4512 | 4521 |
| 4513 // Restore callee-saved registers and return. | 4522 // Restore callee-saved registers and return. |
| 4514 #ifdef DEBUG | 4523 #ifdef DEBUG |
| 4515 if (FLAG_debug_code) { | 4524 if (FLAG_debug_code) { |
| 4516 __ mov(lr, Operand(pc)); | 4525 __ mov(lr, Operand(pc)); |
| 4517 } | 4526 } |
| (...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4654 | 4663 |
| 4655 Label object_not_null, object_not_null_or_smi; | 4664 Label object_not_null, object_not_null_or_smi; |
| 4656 __ bind(¬_js_object); | 4665 __ bind(¬_js_object); |
| 4657 // Before null, smi and string value checks, check that the rhs is a function | 4666 // Before null, smi and string value checks, check that the rhs is a function |
| 4658 // as for a non-function rhs an exception needs to be thrown. | 4667 // as for a non-function rhs an exception needs to be thrown. |
| 4659 __ JumpIfSmi(function, &slow); | 4668 __ JumpIfSmi(function, &slow); |
| 4660 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); | 4669 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); |
| 4661 __ b(ne, &slow); | 4670 __ b(ne, &slow); |
| 4662 | 4671 |
| 4663 // Null is not instance of anything. | 4672 // Null is not instance of anything. |
| 4664 __ cmp(scratch, Operand(Factory::null_value())); | 4673 __ cmp(scratch, Operand(FACTORY->null_value())); |
| 4665 __ b(ne, &object_not_null); | 4674 __ b(ne, &object_not_null); |
| 4666 __ mov(r0, Operand(Smi::FromInt(1))); | 4675 __ mov(r0, Operand(Smi::FromInt(1))); |
| 4667 __ Ret(HasArgsInRegisters() ? 0 : 2); | 4676 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 4668 | 4677 |
| 4669 __ bind(&object_not_null); | 4678 __ bind(&object_not_null); |
| 4670 // Smi values are not instances of anything. | 4679 // Smi values are not instances of anything. |
| 4671 __ JumpIfNotSmi(object, &object_not_null_or_smi); | 4680 __ JumpIfNotSmi(object, &object_not_null_or_smi); |
| 4672 __ mov(r0, Operand(Smi::FromInt(1))); | 4681 __ mov(r0, Operand(Smi::FromInt(1))); |
| 4673 __ Ret(HasArgsInRegisters() ? 0 : 2); | 4682 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 4674 | 4683 |
| (...skipping 377 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5052 // RegExp code to avoid handling changing stack height. | 5061 // RegExp code to avoid handling changing stack height. |
| 5053 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); | 5062 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); |
| 5054 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); | 5063 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); |
| 5055 | 5064 |
| 5056 // r1: previous index | 5065 // r1: previous index |
| 5057 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); | 5066 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); |
| 5058 // r7: code | 5067 // r7: code |
| 5059 // subject: Subject string | 5068 // subject: Subject string |
| 5060 // regexp_data: RegExp data (FixedArray) | 5069 // regexp_data: RegExp data (FixedArray) |
| 5061 // All checks done. Now push arguments for native regexp code. | 5070 // All checks done. Now push arguments for native regexp code. |
| 5062 __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); | 5071 __ IncrementCounter(COUNTERS->regexp_entry_native(), 1, r0, r2); |
| 5063 | 5072 |
| 5064 static const int kRegExpExecuteArguments = 7; | 5073 // Isolates: note we add an additional parameter here (isolate pointer). |
| 5074 static const int kRegExpExecuteArguments = 8; |
| 5065 static const int kParameterRegisters = 4; | 5075 static const int kParameterRegisters = 4; |
| 5066 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); | 5076 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); |
| 5067 | 5077 |
| 5068 // Stack pointer now points to cell where return address is to be written. | 5078 // Stack pointer now points to cell where return address is to be written. |
| 5069 // Arguments are before that on the stack or in registers. | 5079 // Arguments are before that on the stack or in registers. |
| 5070 | 5080 |
| 5081 // Argument 8 (sp[16]): Pass current isolate address. |
| 5082 __ mov(r0, Operand(ExternalReference::isolate_address())); |
| 5083 __ str(r0, MemOperand(sp, 4 * kPointerSize)); |
| 5084 |
| 5071 // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. | 5085 // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. |
| 5072 __ mov(r0, Operand(1)); | 5086 __ mov(r0, Operand(1)); |
| 5073 __ str(r0, MemOperand(sp, 3 * kPointerSize)); | 5087 __ str(r0, MemOperand(sp, 3 * kPointerSize)); |
| 5074 | 5088 |
| 5075 // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. | 5089 // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. |
| 5076 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); | 5090 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); |
| 5077 __ ldr(r0, MemOperand(r0, 0)); | 5091 __ ldr(r0, MemOperand(r0, 0)); |
| 5078 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); | 5092 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); |
| 5079 __ ldr(r2, MemOperand(r2, 0)); | 5093 __ ldr(r2, MemOperand(r2, 0)); |
| 5080 __ add(r0, r0, Operand(r2)); | 5094 __ add(r0, r0, Operand(r2)); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5124 __ b(eq, &failure); | 5138 __ b(eq, &failure); |
| 5125 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | 5139 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); |
| 5126 // If not exception it can only be retry. Handle that in the runtime system. | 5140 // If not exception it can only be retry. Handle that in the runtime system. |
| 5127 __ b(ne, &runtime); | 5141 __ b(ne, &runtime); |
| 5128 // Result must now be exception. If there is no pending exception already a | 5142 // Result must now be exception. If there is no pending exception already a |
| 5129 // stack overflow (on the backtrack stack) was detected in RegExp code but | 5143 // stack overflow (on the backtrack stack) was detected in RegExp code but |
| 5130 // haven't created the exception yet. Handle that in the runtime system. | 5144 // haven't created the exception yet. Handle that in the runtime system. |
| 5131 // TODO(592): Rerunning the RegExp to get the stack overflow exception. | 5145 // TODO(592): Rerunning the RegExp to get the stack overflow exception. |
| 5132 __ mov(r1, Operand(ExternalReference::the_hole_value_location())); | 5146 __ mov(r1, Operand(ExternalReference::the_hole_value_location())); |
| 5133 __ ldr(r1, MemOperand(r1, 0)); | 5147 __ ldr(r1, MemOperand(r1, 0)); |
| 5134 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); | 5148 __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address))); |
| 5135 __ ldr(r0, MemOperand(r2, 0)); | 5149 __ ldr(r0, MemOperand(r2, 0)); |
| 5136 __ cmp(r0, r1); | 5150 __ cmp(r0, r1); |
| 5137 __ b(eq, &runtime); | 5151 __ b(eq, &runtime); |
| 5138 | 5152 |
| 5139 __ str(r1, MemOperand(r2, 0)); // Clear pending exception. | 5153 __ str(r1, MemOperand(r2, 0)); // Clear pending exception. |
| 5140 | 5154 |
| 5141 // Check if the exception is a termination. If so, throw as uncatchable. | 5155 // Check if the exception is a termination. If so, throw as uncatchable. |
| 5142 __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex); | 5156 __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex); |
| 5143 __ cmp(r0, ip); | 5157 __ cmp(r0, ip); |
| 5144 Label termination_exception; | 5158 Label termination_exception; |
| 5145 __ b(eq, &termination_exception); | 5159 __ b(eq, &termination_exception); |
| 5146 | 5160 |
| 5147 __ Throw(r0); // Expects thrown value in r0. | 5161 __ Throw(r0); // Expects thrown value in r0. |
| 5148 | 5162 |
| 5149 __ bind(&termination_exception); | 5163 __ bind(&termination_exception); |
| 5150 __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0. | 5164 __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0. |
| 5151 | 5165 |
| 5152 __ bind(&failure); | 5166 __ bind(&failure); |
| 5153 // For failure and exception return null. | 5167 // For failure and exception return null. |
| 5154 __ mov(r0, Operand(Factory::null_value())); | 5168 __ mov(r0, Operand(FACTORY->null_value())); |
| 5155 __ add(sp, sp, Operand(4 * kPointerSize)); | 5169 __ add(sp, sp, Operand(4 * kPointerSize)); |
| 5156 __ Ret(); | 5170 __ Ret(); |
| 5157 | 5171 |
| 5158 // Process the result from the native regexp code. | 5172 // Process the result from the native regexp code. |
| 5159 __ bind(&success); | 5173 __ bind(&success); |
| 5160 __ ldr(r1, | 5174 __ ldr(r1, |
| 5161 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 5175 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
| 5162 // Calculate number of capture registers (number_of_captures + 1) * 2. | 5176 // Calculate number of capture registers (number_of_captures + 1) * 2. |
| 5163 STATIC_ASSERT(kSmiTag == 0); | 5177 STATIC_ASSERT(kSmiTag == 0); |
| 5164 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 5178 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5253 // r0: Start of allocated area, object-tagged. | 5267 // r0: Start of allocated area, object-tagged. |
| 5254 // r1: Number of elements in array, as smi. | 5268 // r1: Number of elements in array, as smi. |
| 5255 // r5: Number of elements, untagged. | 5269 // r5: Number of elements, untagged. |
| 5256 | 5270 |
| 5257 // Set JSArray map to global.regexp_result_map(). | 5271 // Set JSArray map to global.regexp_result_map(). |
| 5258 // Set empty properties FixedArray. | 5272 // Set empty properties FixedArray. |
| 5259 // Set elements to point to FixedArray allocated right after the JSArray. | 5273 // Set elements to point to FixedArray allocated right after the JSArray. |
| 5260 // Interleave operations for better latency. | 5274 // Interleave operations for better latency. |
| 5261 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); | 5275 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); |
| 5262 __ add(r3, r0, Operand(JSRegExpResult::kSize)); | 5276 __ add(r3, r0, Operand(JSRegExpResult::kSize)); |
| 5263 __ mov(r4, Operand(Factory::empty_fixed_array())); | 5277 __ mov(r4, Operand(FACTORY->empty_fixed_array())); |
| 5264 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); | 5278 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); |
| 5265 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); | 5279 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); |
| 5266 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); | 5280 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); |
| 5267 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | 5281 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
| 5268 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5282 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 5269 | 5283 |
| 5270 // Set input, index and length fields from arguments. | 5284 // Set input, index and length fields from arguments. |
| 5271 __ ldr(r1, MemOperand(sp, kPointerSize * 0)); | 5285 __ ldr(r1, MemOperand(sp, kPointerSize * 0)); |
| 5272 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); | 5286 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); |
| 5273 __ ldr(r1, MemOperand(sp, kPointerSize * 1)); | 5287 __ ldr(r1, MemOperand(sp, kPointerSize * 1)); |
| 5274 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); | 5288 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); |
| 5275 __ ldr(r1, MemOperand(sp, kPointerSize * 2)); | 5289 __ ldr(r1, MemOperand(sp, kPointerSize * 2)); |
| 5276 __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset)); | 5290 __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset)); |
| 5277 | 5291 |
| 5278 // Fill out the elements FixedArray. | 5292 // Fill out the elements FixedArray. |
| 5279 // r0: JSArray, tagged. | 5293 // r0: JSArray, tagged. |
| 5280 // r3: FixedArray, tagged. | 5294 // r3: FixedArray, tagged. |
| 5281 // r5: Number of elements in array, untagged. | 5295 // r5: Number of elements in array, untagged. |
| 5282 | 5296 |
| 5283 // Set map. | 5297 // Set map. |
| 5284 __ mov(r2, Operand(Factory::fixed_array_map())); | 5298 __ mov(r2, Operand(FACTORY->fixed_array_map())); |
| 5285 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | 5299 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); |
| 5286 // Set FixedArray length. | 5300 // Set FixedArray length. |
| 5287 __ mov(r6, Operand(r5, LSL, kSmiTagSize)); | 5301 __ mov(r6, Operand(r5, LSL, kSmiTagSize)); |
| 5288 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 5302 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
| 5289 // Fill contents of fixed-array with the-hole. | 5303 // Fill contents of fixed-array with the-hole. |
| 5290 __ mov(r2, Operand(Factory::the_hole_value())); | 5304 __ mov(r2, Operand(FACTORY->the_hole_value())); |
| 5291 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 5305 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 5292 // Fill fixed array elements with hole. | 5306 // Fill fixed array elements with hole. |
| 5293 // r0: JSArray, tagged. | 5307 // r0: JSArray, tagged. |
| 5294 // r2: the hole. | 5308 // r2: the hole. |
| 5295 // r3: Start of elements in FixedArray. | 5309 // r3: Start of elements in FixedArray. |
| 5296 // r5: Number of elements to fill. | 5310 // r5: Number of elements to fill. |
| 5297 Label loop; | 5311 Label loop; |
| 5298 __ tst(r5, Operand(r5)); | 5312 __ tst(r5, Operand(r5)); |
| 5299 __ bind(&loop); | 5313 __ bind(&loop); |
| 5300 __ b(le, &done); // Jump if r1 is negative or zero. | 5314 __ b(le, &done); // Jump if r1 is negative or zero. |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5357 __ InvokeFunction(r1, actual, JUMP_FUNCTION); | 5371 __ InvokeFunction(r1, actual, JUMP_FUNCTION); |
| 5358 | 5372 |
| 5359 // Slow-case: Non-function called. | 5373 // Slow-case: Non-function called. |
| 5360 __ bind(&slow); | 5374 __ bind(&slow); |
| 5361 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | 5375 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
| 5362 // of the original receiver from the call site). | 5376 // of the original receiver from the call site). |
| 5363 __ str(r1, MemOperand(sp, argc_ * kPointerSize)); | 5377 __ str(r1, MemOperand(sp, argc_ * kPointerSize)); |
| 5364 __ mov(r0, Operand(argc_)); // Setup the number of arguments. | 5378 __ mov(r0, Operand(argc_)); // Setup the number of arguments. |
| 5365 __ mov(r2, Operand(0, RelocInfo::NONE)); | 5379 __ mov(r2, Operand(0, RelocInfo::NONE)); |
| 5366 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | 5380 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); |
| 5367 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), | 5381 __ Jump(Handle<Code>(Isolate::Current()->builtins()->builtin( |
| 5382 Builtins::ArgumentsAdaptorTrampoline)), |
| 5368 RelocInfo::CODE_TARGET); | 5383 RelocInfo::CODE_TARGET); |
| 5369 } | 5384 } |
| 5370 | 5385 |
| 5371 | 5386 |
| 5372 // Unfortunately you have to run without snapshots to see most of these | 5387 // Unfortunately you have to run without snapshots to see most of these |
| 5373 // names in the profile since most compare stubs end up in the snapshot. | 5388 // names in the profile since most compare stubs end up in the snapshot. |
| 5374 const char* CompareStub::GetName() { | 5389 const char* CompareStub::GetName() { |
| 5375 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | 5390 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || |
| 5376 (lhs_.is(r1) && rhs_.is(r0))); | 5391 (lhs_.is(r1) && rhs_.is(r0))); |
| 5377 | 5392 |
| 5378 if (name_ != NULL) return name_; | 5393 if (name_ != NULL) return name_; |
| 5379 const int kMaxNameLength = 100; | 5394 const int kMaxNameLength = 100; |
| 5380 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); | 5395 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |
| 5396 kMaxNameLength); |
| 5381 if (name_ == NULL) return "OOM"; | 5397 if (name_ == NULL) return "OOM"; |
| 5382 | 5398 |
| 5383 const char* cc_name; | 5399 const char* cc_name; |
| 5384 switch (cc_) { | 5400 switch (cc_) { |
| 5385 case lt: cc_name = "LT"; break; | 5401 case lt: cc_name = "LT"; break; |
| 5386 case gt: cc_name = "GT"; break; | 5402 case gt: cc_name = "GT"; break; |
| 5387 case le: cc_name = "LE"; break; | 5403 case le: cc_name = "LE"; break; |
| 5388 case ge: cc_name = "GE"; break; | 5404 case ge: cc_name = "GE"; break; |
| 5389 case eq: cc_name = "EQ"; break; | 5405 case eq: cc_name = "EQ"; break; |
| 5390 case ne: cc_name = "NE"; break; | 5406 case ne: cc_name = "NE"; break; |
| (...skipping 773 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6164 // Sub string of length 2 requested. | 6180 // Sub string of length 2 requested. |
| 6165 // Get the two characters forming the sub string. | 6181 // Get the two characters forming the sub string. |
| 6166 __ add(r5, r5, Operand(r3)); | 6182 __ add(r5, r5, Operand(r3)); |
| 6167 __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); | 6183 __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); |
| 6168 __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); | 6184 __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); |
| 6169 | 6185 |
| 6170 // Try to lookup two character string in symbol table. | 6186 // Try to lookup two character string in symbol table. |
| 6171 Label make_two_character_string; | 6187 Label make_two_character_string; |
| 6172 StringHelper::GenerateTwoCharacterSymbolTableProbe( | 6188 StringHelper::GenerateTwoCharacterSymbolTableProbe( |
| 6173 masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); | 6189 masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); |
| 6174 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | 6190 __ IncrementCounter(COUNTERS->sub_string_native(), 1, r3, r4); |
| 6175 __ add(sp, sp, Operand(3 * kPointerSize)); | 6191 __ add(sp, sp, Operand(3 * kPointerSize)); |
| 6176 __ Ret(); | 6192 __ Ret(); |
| 6177 | 6193 |
| 6178 // r2: result string length. | 6194 // r2: result string length. |
| 6179 // r3: two characters combined into halfword in little endian byte order. | 6195 // r3: two characters combined into halfword in little endian byte order. |
| 6180 __ bind(&make_two_character_string); | 6196 __ bind(&make_two_character_string); |
| 6181 __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); | 6197 __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); |
| 6182 __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | 6198 __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); |
| 6183 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | 6199 __ IncrementCounter(COUNTERS->sub_string_native(), 1, r3, r4); |
| 6184 __ add(sp, sp, Operand(3 * kPointerSize)); | 6200 __ add(sp, sp, Operand(3 * kPointerSize)); |
| 6185 __ Ret(); | 6201 __ Ret(); |
| 6186 | 6202 |
| 6187 __ bind(&result_longer_than_two); | 6203 __ bind(&result_longer_than_two); |
| 6188 | 6204 |
| 6189 // Allocate the result. | 6205 // Allocate the result. |
| 6190 __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); | 6206 __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); |
| 6191 | 6207 |
| 6192 // r0: result string. | 6208 // r0: result string. |
| 6193 // r2: result string length. | 6209 // r2: result string length. |
| 6194 // r5: string. | 6210 // r5: string. |
| 6195 // r7 (a.k.a. from): from offset (smi) | 6211 // r7 (a.k.a. from): from offset (smi) |
| 6196 // Locate first character of result. | 6212 // Locate first character of result. |
| 6197 __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 6213 __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 6198 // Locate 'from' character of string. | 6214 // Locate 'from' character of string. |
| 6199 __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 6215 __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 6200 __ add(r5, r5, Operand(from, ASR, 1)); | 6216 __ add(r5, r5, Operand(from, ASR, 1)); |
| 6201 | 6217 |
| 6202 // r0: result string. | 6218 // r0: result string. |
| 6203 // r1: first character of result string. | 6219 // r1: first character of result string. |
| 6204 // r2: result string length. | 6220 // r2: result string length. |
| 6205 // r5: first character of sub string to copy. | 6221 // r5: first character of sub string to copy. |
| 6206 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); | 6222 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 6207 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, | 6223 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, |
| 6208 COPY_ASCII | DEST_ALWAYS_ALIGNED); | 6224 COPY_ASCII | DEST_ALWAYS_ALIGNED); |
| 6209 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | 6225 __ IncrementCounter(COUNTERS->sub_string_native(), 1, r3, r4); |
| 6210 __ add(sp, sp, Operand(3 * kPointerSize)); | 6226 __ add(sp, sp, Operand(3 * kPointerSize)); |
| 6211 __ Ret(); | 6227 __ Ret(); |
| 6212 | 6228 |
| 6213 __ bind(&non_ascii_flat); | 6229 __ bind(&non_ascii_flat); |
| 6214 // r2: result string length. | 6230 // r2: result string length. |
| 6215 // r5: string. | 6231 // r5: string. |
| 6216 // r7 (a.k.a. from): from offset (smi) | 6232 // r7 (a.k.a. from): from offset (smi) |
| 6217 // Check for flat two byte string. | 6233 // Check for flat two byte string. |
| 6218 | 6234 |
| 6219 // Allocate the result. | 6235 // Allocate the result. |
| (...skipping 11 matching lines...) Expand all Loading... |
| 6231 __ add(r5, r5, Operand(from)); | 6247 __ add(r5, r5, Operand(from)); |
| 6232 from = no_reg; | 6248 from = no_reg; |
| 6233 | 6249 |
| 6234 // r0: result string. | 6250 // r0: result string. |
| 6235 // r1: first character of result. | 6251 // r1: first character of result. |
| 6236 // r2: result length. | 6252 // r2: result length. |
| 6237 // r5: first character of string to copy. | 6253 // r5: first character of string to copy. |
| 6238 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 6254 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 6239 StringHelper::GenerateCopyCharactersLong( | 6255 StringHelper::GenerateCopyCharactersLong( |
| 6240 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); | 6256 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); |
| 6241 __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | 6257 __ IncrementCounter(COUNTERS->sub_string_native(), 1, r3, r4); |
| 6242 __ add(sp, sp, Operand(3 * kPointerSize)); | 6258 __ add(sp, sp, Operand(3 * kPointerSize)); |
| 6243 __ Ret(); | 6259 __ Ret(); |
| 6244 | 6260 |
| 6245 // Just jump to runtime to create the sub string. | 6261 // Just jump to runtime to create the sub string. |
| 6246 __ bind(&runtime); | 6262 __ bind(&runtime); |
| 6247 __ TailCallRuntime(Runtime::kSubString, 3, 1); | 6263 __ TailCallRuntime(Runtime::kSubString, 3, 1); |
| 6248 } | 6264 } |
| 6249 | 6265 |
| 6250 | 6266 |
| 6251 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 6267 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6314 // sp[0]: right string | 6330 // sp[0]: right string |
| 6315 // sp[4]: left string | 6331 // sp[4]: left string |
| 6316 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1. | 6332 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1. |
| 6317 | 6333 |
| 6318 Label not_same; | 6334 Label not_same; |
| 6319 __ cmp(r0, r1); | 6335 __ cmp(r0, r1); |
| 6320 __ b(ne, ¬_same); | 6336 __ b(ne, ¬_same); |
| 6321 STATIC_ASSERT(EQUAL == 0); | 6337 STATIC_ASSERT(EQUAL == 0); |
| 6322 STATIC_ASSERT(kSmiTag == 0); | 6338 STATIC_ASSERT(kSmiTag == 0); |
| 6323 __ mov(r0, Operand(Smi::FromInt(EQUAL))); | 6339 __ mov(r0, Operand(Smi::FromInt(EQUAL))); |
| 6324 __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); | 6340 __ IncrementCounter(COUNTERS->string_compare_native(), 1, r1, r2); |
| 6325 __ add(sp, sp, Operand(2 * kPointerSize)); | 6341 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 6326 __ Ret(); | 6342 __ Ret(); |
| 6327 | 6343 |
| 6328 __ bind(¬_same); | 6344 __ bind(¬_same); |
| 6329 | 6345 |
| 6330 // Check that both objects are sequential ASCII strings. | 6346 // Check that both objects are sequential ASCII strings. |
| 6331 __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); | 6347 __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); |
| 6332 | 6348 |
| 6333 // Compare flat ASCII strings natively. Remove arguments from stack first. | 6349 // Compare flat ASCII strings natively. Remove arguments from stack first. |
| 6334 __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); | 6350 __ IncrementCounter(COUNTERS->string_compare_native(), 1, r2, r3); |
| 6335 __ add(sp, sp, Operand(2 * kPointerSize)); | 6351 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 6336 GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); | 6352 GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); |
| 6337 | 6353 |
| 6338 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | 6354 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) |
| 6339 // tagged as a small integer. | 6355 // tagged as a small integer. |
| 6340 __ bind(&runtime); | 6356 __ bind(&runtime); |
| 6341 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 6357 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 6342 } | 6358 } |
| 6343 | 6359 |
| 6344 | 6360 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6394 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); | 6410 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); |
| 6395 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); | 6411 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); |
| 6396 STATIC_ASSERT(kSmiTag == 0); | 6412 STATIC_ASSERT(kSmiTag == 0); |
| 6397 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. | 6413 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. |
| 6398 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. | 6414 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. |
| 6399 STATIC_ASSERT(kSmiTag == 0); | 6415 STATIC_ASSERT(kSmiTag == 0); |
| 6400 // Else test if second string is empty. | 6416 // Else test if second string is empty. |
| 6401 __ cmp(r3, Operand(Smi::FromInt(0)), ne); | 6417 __ cmp(r3, Operand(Smi::FromInt(0)), ne); |
| 6402 __ b(ne, &strings_not_empty); // If either string was empty, return r0. | 6418 __ b(ne, &strings_not_empty); // If either string was empty, return r0. |
| 6403 | 6419 |
| 6404 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 6420 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); |
| 6405 __ add(sp, sp, Operand(2 * kPointerSize)); | 6421 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 6406 __ Ret(); | 6422 __ Ret(); |
| 6407 | 6423 |
| 6408 __ bind(&strings_not_empty); | 6424 __ bind(&strings_not_empty); |
| 6409 } | 6425 } |
| 6410 | 6426 |
| 6411 __ mov(r2, Operand(r2, ASR, kSmiTagSize)); | 6427 __ mov(r2, Operand(r2, ASR, kSmiTagSize)); |
| 6412 __ mov(r3, Operand(r3, ASR, kSmiTagSize)); | 6428 __ mov(r3, Operand(r3, ASR, kSmiTagSize)); |
| 6413 // Both strings are non-empty. | 6429 // Both strings are non-empty. |
| 6414 // r0: first string | 6430 // r0: first string |
| (...skipping 24 matching lines...) Expand all Loading... |
| 6439 | 6455 |
| 6440 // Get the two characters forming the sub string. | 6456 // Get the two characters forming the sub string. |
| 6441 __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | 6457 __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); |
| 6442 __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); | 6458 __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); |
| 6443 | 6459 |
| 6444 // Try to lookup two character string in symbol table. If it is not found | 6460 // Try to lookup two character string in symbol table. If it is not found |
| 6445 // just allocate a new one. | 6461 // just allocate a new one. |
| 6446 Label make_two_character_string; | 6462 Label make_two_character_string; |
| 6447 StringHelper::GenerateTwoCharacterSymbolTableProbe( | 6463 StringHelper::GenerateTwoCharacterSymbolTableProbe( |
| 6448 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); | 6464 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); |
| 6449 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 6465 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); |
| 6450 __ add(sp, sp, Operand(2 * kPointerSize)); | 6466 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 6451 __ Ret(); | 6467 __ Ret(); |
| 6452 | 6468 |
| 6453 __ bind(&make_two_character_string); | 6469 __ bind(&make_two_character_string); |
| 6454 // Resulting string has length 2 and first chars of two strings | 6470 // Resulting string has length 2 and first chars of two strings |
| 6455 // are combined into single halfword in r2 register. | 6471 // are combined into single halfword in r2 register. |
| 6456 // So we can fill resulting string without two loops by a single | 6472 // So we can fill resulting string without two loops by a single |
| 6457 // halfword store instruction (which assumes that processor is | 6473 // halfword store instruction (which assumes that processor is |
| 6458 // in a little endian mode) | 6474 // in a little endian mode) |
| 6459 __ mov(r6, Operand(2)); | 6475 __ mov(r6, Operand(2)); |
| 6460 __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); | 6476 __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); |
| 6461 __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | 6477 __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); |
| 6462 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 6478 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); |
| 6463 __ add(sp, sp, Operand(2 * kPointerSize)); | 6479 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 6464 __ Ret(); | 6480 __ Ret(); |
| 6465 | 6481 |
| 6466 __ bind(&longer_than_two); | 6482 __ bind(&longer_than_two); |
| 6467 // Check if resulting string will be flat. | 6483 // Check if resulting string will be flat. |
| 6468 __ cmp(r6, Operand(String::kMinNonFlatLength)); | 6484 __ cmp(r6, Operand(String::kMinNonFlatLength)); |
| 6469 __ b(lt, &string_add_flat_result); | 6485 __ b(lt, &string_add_flat_result); |
| 6470 // Handle exceptionally long strings in the runtime system. | 6486 // Handle exceptionally long strings in the runtime system. |
| 6471 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); | 6487 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); |
| 6472 ASSERT(IsPowerOf2(String::kMaxLength + 1)); | 6488 ASSERT(IsPowerOf2(String::kMaxLength + 1)); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 6489 __ b(eq, &non_ascii); | 6505 __ b(eq, &non_ascii); |
| 6490 | 6506 |
| 6491 // Allocate an ASCII cons string. | 6507 // Allocate an ASCII cons string. |
| 6492 __ bind(&ascii_data); | 6508 __ bind(&ascii_data); |
| 6493 __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); | 6509 __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); |
| 6494 __ bind(&allocated); | 6510 __ bind(&allocated); |
| 6495 // Fill the fields of the cons string. | 6511 // Fill the fields of the cons string. |
| 6496 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); | 6512 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); |
| 6497 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); | 6513 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); |
| 6498 __ mov(r0, Operand(r7)); | 6514 __ mov(r0, Operand(r7)); |
| 6499 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 6515 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); |
| 6500 __ add(sp, sp, Operand(2 * kPointerSize)); | 6516 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 6501 __ Ret(); | 6517 __ Ret(); |
| 6502 | 6518 |
| 6503 __ bind(&non_ascii); | 6519 __ bind(&non_ascii); |
| 6504 // At least one of the strings is two-byte. Check whether it happens | 6520 // At least one of the strings is two-byte. Check whether it happens |
| 6505 // to contain only ASCII characters. | 6521 // to contain only ASCII characters. |
| 6506 // r4: first instance type. | 6522 // r4: first instance type. |
| 6507 // r5: second instance type. | 6523 // r5: second instance type. |
| 6508 __ tst(r4, Operand(kAsciiDataHintMask)); | 6524 __ tst(r4, Operand(kAsciiDataHintMask)); |
| 6509 __ tst(r5, Operand(kAsciiDataHintMask), ne); | 6525 __ tst(r5, Operand(kAsciiDataHintMask), ne); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6571 StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); | 6587 StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); |
| 6572 | 6588 |
| 6573 // Load second argument and locate first character. | 6589 // Load second argument and locate first character. |
| 6574 __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 6590 __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 6575 // r1: first character of second string. | 6591 // r1: first character of second string. |
| 6576 // r3: length of second string. | 6592 // r3: length of second string. |
| 6577 // r6: next character of result. | 6593 // r6: next character of result. |
| 6578 // r7: result string. | 6594 // r7: result string. |
| 6579 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); | 6595 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); |
| 6580 __ mov(r0, Operand(r7)); | 6596 __ mov(r0, Operand(r7)); |
| 6581 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 6597 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); |
| 6582 __ add(sp, sp, Operand(2 * kPointerSize)); | 6598 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 6583 __ Ret(); | 6599 __ Ret(); |
| 6584 | 6600 |
| 6585 __ bind(&non_ascii_string_add_flat_result); | 6601 __ bind(&non_ascii_string_add_flat_result); |
| 6586 // Both strings are sequential two byte strings. | 6602 // Both strings are sequential two byte strings. |
| 6587 // r0: first string. | 6603 // r0: first string. |
| 6588 // r1: second string. | 6604 // r1: second string. |
| 6589 // r2: length of first string. | 6605 // r2: length of first string. |
| 6590 // r3: length of second string. | 6606 // r3: length of second string. |
| 6591 // r6: sum of length of strings. | 6607 // r6: sum of length of strings. |
| (...skipping 20 matching lines...) Expand all Loading... |
| 6612 // Locate first character of second argument. | 6628 // Locate first character of second argument. |
| 6613 __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 6629 __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| 6614 | 6630 |
| 6615 // r1: first character of second string. | 6631 // r1: first character of second string. |
| 6616 // r3: length of second string. | 6632 // r3: length of second string. |
| 6617 // r6: next character of result (after copy of first string). | 6633 // r6: next character of result (after copy of first string). |
| 6618 // r7: result string. | 6634 // r7: result string. |
| 6619 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); | 6635 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); |
| 6620 | 6636 |
| 6621 __ mov(r0, Operand(r7)); | 6637 __ mov(r0, Operand(r7)); |
| 6622 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 6638 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); |
| 6623 __ add(sp, sp, Operand(2 * kPointerSize)); | 6639 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 6624 __ Ret(); | 6640 __ Ret(); |
| 6625 | 6641 |
| 6626 // Just jump to runtime to add the two strings. | 6642 // Just jump to runtime to add the two strings. |
| 6627 __ bind(&string_add_runtime); | 6643 __ bind(&string_add_runtime); |
| 6628 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 6644 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
| 6629 | 6645 |
| 6630 if (call_builtin.is_linked()) { | 6646 if (call_builtin.is_linked()) { |
| 6631 __ bind(&call_builtin); | 6647 __ bind(&call_builtin); |
| 6632 __ InvokeBuiltin(builtin_id, JUMP_JS); | 6648 __ InvokeBuiltin(builtin_id, JUMP_JS); |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6715 __ tst(r2, Operand(kSmiTagMask)); | 6731 __ tst(r2, Operand(kSmiTagMask)); |
| 6716 __ b(eq, &generic_stub); | 6732 __ b(eq, &generic_stub); |
| 6717 | 6733 |
| 6718 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); | 6734 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); |
| 6719 __ b(ne, &miss); | 6735 __ b(ne, &miss); |
| 6720 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); | 6736 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); |
| 6721 __ b(ne, &miss); | 6737 __ b(ne, &miss); |
| 6722 | 6738 |
| 6723 // Inlining the double comparison and falling back to the general compare | 6739 // Inlining the double comparison and falling back to the general compare |
| 6724 // stub if NaN is involved or VFP3 is unsupported. | 6740 // stub if NaN is involved or VFP3 is unsupported. |
| 6725 if (CpuFeatures::IsSupported(VFP3)) { | 6741 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { |
| 6726 CpuFeatures::Scope scope(VFP3); | 6742 CpuFeatures::Scope scope(VFP3); |
| 6727 | 6743 |
| 6728 // Load left and right operand | 6744 // Load left and right operand |
| 6729 __ sub(r2, r1, Operand(kHeapObjectTag)); | 6745 __ sub(r2, r1, Operand(kHeapObjectTag)); |
| 6730 __ vldr(d0, r2, HeapNumber::kValueOffset); | 6746 __ vldr(d0, r2, HeapNumber::kValueOffset); |
| 6731 __ sub(r2, r0, Operand(kHeapObjectTag)); | 6747 __ sub(r2, r0, Operand(kHeapObjectTag)); |
| 6732 __ vldr(d1, r2, HeapNumber::kValueOffset); | 6748 __ vldr(d1, r2, HeapNumber::kValueOffset); |
| 6733 | 6749 |
| 6734 // Compare operands | 6750 // Compare operands |
| 6735 __ VFPCompareAndSetFlags(d0, d1); | 6751 __ VFPCompareAndSetFlags(d0, d1); |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6822 __ str(pc, MemOperand(sp, 0)); | 6838 __ str(pc, MemOperand(sp, 0)); |
| 6823 __ Jump(target); // Call the C++ function. | 6839 __ Jump(target); // Call the C++ function. |
| 6824 } | 6840 } |
| 6825 | 6841 |
| 6826 | 6842 |
| 6827 #undef __ | 6843 #undef __ |
| 6828 | 6844 |
| 6829 } } // namespace v8::internal | 6845 } } // namespace v8::internal |
| 6830 | 6846 |
| 6831 #endif // V8_TARGET_ARCH_ARM | 6847 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |