Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(265)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 10818026: Relax requirement from VFP3 to VFP2 where possible. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 581 matching lines...) Expand 10 before | Expand all | Expand 10 after
592 exponent, 592 exponent,
593 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); 593 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
594 __ Ret(); 594 __ Ret();
595 } 595 }
596 596
597 597
598 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, 598 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
599 FloatingPointHelper::Destination destination, 599 FloatingPointHelper::Destination destination,
600 Register scratch1, 600 Register scratch1,
601 Register scratch2) { 601 Register scratch2) {
602 if (CpuFeatures::IsSupported(VFP3)) { 602 if (CpuFeatures::IsSupported(VFP2)) {
603 CpuFeatures::Scope scope(VFP3); 603 CpuFeatures::Scope scope(VFP2);
604 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); 604 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
605 __ vmov(d7.high(), scratch1); 605 __ vmov(d7.high(), scratch1);
606 __ vcvt_f64_s32(d7, d7.high()); 606 __ vcvt_f64_s32(d7, d7.high());
607 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); 607 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
608 __ vmov(d6.high(), scratch1); 608 __ vmov(d6.high(), scratch1);
609 __ vcvt_f64_s32(d6, d6.high()); 609 __ vcvt_f64_s32(d6, d6.high());
610 if (destination == kCoreRegisters) { 610 if (destination == kCoreRegisters) {
611 __ vmov(r2, r3, d7); 611 __ vmov(r2, r3, d7);
612 __ vmov(r0, r1, d6); 612 __ vmov(r0, r1, d6);
613 } 613 }
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
662 } 662 }
663 663
664 Label is_smi, done; 664 Label is_smi, done;
665 665
666 // Smi-check 666 // Smi-check
667 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); 667 __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
668 // Heap number check 668 // Heap number check
669 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); 669 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
670 670
671 // Handle loading a double from a heap number. 671 // Handle loading a double from a heap number.
672 if (CpuFeatures::IsSupported(VFP3) && 672 if (CpuFeatures::IsSupported(VFP2) &&
673 destination == kVFPRegisters) { 673 destination == kVFPRegisters) {
674 CpuFeatures::Scope scope(VFP3); 674 CpuFeatures::Scope scope(VFP2);
675 // Load the double from tagged HeapNumber to double register. 675 // Load the double from tagged HeapNumber to double register.
676 __ sub(scratch1, object, Operand(kHeapObjectTag)); 676 __ sub(scratch1, object, Operand(kHeapObjectTag));
677 __ vldr(dst, scratch1, HeapNumber::kValueOffset); 677 __ vldr(dst, scratch1, HeapNumber::kValueOffset);
678 } else { 678 } else {
679 ASSERT(destination == kCoreRegisters); 679 ASSERT(destination == kCoreRegisters);
680 // Load the double from heap number to dst1 and dst2 in double format. 680 // Load the double from heap number to dst1 and dst2 in double format.
681 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); 681 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
682 } 682 }
683 __ jmp(&done); 683 __ jmp(&done);
684 684
685 // Handle loading a double from a smi. 685 // Handle loading a double from a smi.
686 __ bind(&is_smi); 686 __ bind(&is_smi);
687 if (CpuFeatures::IsSupported(VFP3)) { 687 if (CpuFeatures::IsSupported(VFP2)) {
688 CpuFeatures::Scope scope(VFP3); 688 CpuFeatures::Scope scope(VFP2);
689 // Convert smi to double using VFP instructions. 689 // Convert smi to double using VFP instructions.
690 __ vmov(dst.high(), scratch1); 690 __ vmov(dst.high(), scratch1);
691 __ vcvt_f64_s32(dst, dst.high()); 691 __ vcvt_f64_s32(dst, dst.high());
692 if (destination == kCoreRegisters) { 692 if (destination == kCoreRegisters) {
693 // Load the converted smi to dst1 and dst2 in double format. 693 // Load the converted smi to dst1 and dst2 in double format.
694 __ vmov(dst1, dst2, dst); 694 __ vmov(dst1, dst2, dst);
695 } 695 }
696 } else { 696 } else {
697 ASSERT(destination == kCoreRegisters); 697 ASSERT(destination == kCoreRegisters);
698 // Write smi to dst1 and dst2 double format. 698 // Write smi to dst1 and dst2 double format.
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
755 Register dst1, 755 Register dst1,
756 Register dst2, 756 Register dst2,
757 Register scratch2, 757 Register scratch2,
758 SwVfpRegister single_scratch) { 758 SwVfpRegister single_scratch) {
759 ASSERT(!int_scratch.is(scratch2)); 759 ASSERT(!int_scratch.is(scratch2));
760 ASSERT(!int_scratch.is(dst1)); 760 ASSERT(!int_scratch.is(dst1));
761 ASSERT(!int_scratch.is(dst2)); 761 ASSERT(!int_scratch.is(dst2));
762 762
763 Label done; 763 Label done;
764 764
765 if (CpuFeatures::IsSupported(VFP3)) { 765 if (CpuFeatures::IsSupported(VFP2)) {
766 CpuFeatures::Scope scope(VFP3); 766 CpuFeatures::Scope scope(VFP2);
767 __ vmov(single_scratch, int_scratch); 767 __ vmov(single_scratch, int_scratch);
768 __ vcvt_f64_s32(double_dst, single_scratch); 768 __ vcvt_f64_s32(double_dst, single_scratch);
769 if (destination == kCoreRegisters) { 769 if (destination == kCoreRegisters) {
770 __ vmov(dst1, dst2, double_dst); 770 __ vmov(dst1, dst2, double_dst);
771 } 771 }
772 } else { 772 } else {
773 Label fewer_than_20_useful_bits; 773 Label fewer_than_20_useful_bits;
774 // Expected output: 774 // Expected output:
775 // | dst2 | dst1 | 775 // | dst2 | dst1 |
776 // | s | exp | mantissa | 776 // | s | exp | mantissa |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
849 849
850 __ bind(&obj_is_not_smi); 850 __ bind(&obj_is_not_smi);
851 if (FLAG_debug_code) { 851 if (FLAG_debug_code) {
852 __ AbortIfNotRootValue(heap_number_map, 852 __ AbortIfNotRootValue(heap_number_map,
853 Heap::kHeapNumberMapRootIndex, 853 Heap::kHeapNumberMapRootIndex,
854 "HeapNumberMap register clobbered."); 854 "HeapNumberMap register clobbered.");
855 } 855 }
856 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); 856 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
857 857
858 // Load the number. 858 // Load the number.
859 if (CpuFeatures::IsSupported(VFP3)) { 859 if (CpuFeatures::IsSupported(VFP2)) {
860 CpuFeatures::Scope scope(VFP3); 860 CpuFeatures::Scope scope(VFP2);
861 // Load the double value. 861 // Load the double value.
862 __ sub(scratch1, object, Operand(kHeapObjectTag)); 862 __ sub(scratch1, object, Operand(kHeapObjectTag));
863 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); 863 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
864 864
865 __ EmitVFPTruncate(kRoundToZero, 865 __ EmitVFPTruncate(kRoundToZero,
866 single_scratch, 866 single_scratch,
867 double_dst, 867 double_dst,
868 scratch1, 868 scratch1,
869 scratch2, 869 scratch2,
870 kCheckForInexactConversion); 870 kCheckForInexactConversion);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
920 920
921 if (FLAG_debug_code) { 921 if (FLAG_debug_code) {
922 __ AbortIfNotRootValue(heap_number_map, 922 __ AbortIfNotRootValue(heap_number_map,
923 Heap::kHeapNumberMapRootIndex, 923 Heap::kHeapNumberMapRootIndex,
924 "HeapNumberMap register clobbered."); 924 "HeapNumberMap register clobbered.");
925 } 925 }
926 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); 926 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
927 927
928 // Object is a heap number. 928 // Object is a heap number.
929 // Convert the floating point value to a 32-bit integer. 929 // Convert the floating point value to a 32-bit integer.
930 if (CpuFeatures::IsSupported(VFP3)) { 930 if (CpuFeatures::IsSupported(VFP2)) {
931 CpuFeatures::Scope scope(VFP3); 931 CpuFeatures::Scope scope(VFP2);
932 SwVfpRegister single_scratch = double_scratch.low(); 932 SwVfpRegister single_scratch = double_scratch.low();
933 // Load the double value. 933 // Load the double value.
934 __ sub(scratch1, object, Operand(kHeapObjectTag)); 934 __ sub(scratch1, object, Operand(kHeapObjectTag));
935 __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset); 935 __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
936 936
937 __ EmitVFPTruncate(kRoundToZero, 937 __ EmitVFPTruncate(kRoundToZero,
938 single_scratch, 938 single_scratch,
939 double_scratch, 939 double_scratch,
940 scratch1, 940 scratch1,
941 scratch2, 941 scratch2,
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
1051 1051
1052 // Assert that heap_number_result is callee-saved. 1052 // Assert that heap_number_result is callee-saved.
1053 // We currently always use r5 to pass it. 1053 // We currently always use r5 to pass it.
1054 ASSERT(heap_number_result.is(r5)); 1054 ASSERT(heap_number_result.is(r5));
1055 1055
1056 // Push the current return address before the C call. Return will be 1056 // Push the current return address before the C call. Return will be
1057 // through pop(pc) below. 1057 // through pop(pc) below.
1058 __ push(lr); 1058 __ push(lr);
1059 __ PrepareCallCFunction(0, 2, scratch); 1059 __ PrepareCallCFunction(0, 2, scratch);
1060 if (masm->use_eabi_hardfloat()) { 1060 if (masm->use_eabi_hardfloat()) {
1061 CpuFeatures::Scope scope(VFP3); 1061 CpuFeatures::Scope scope(VFP2);
1062 __ vmov(d0, r0, r1); 1062 __ vmov(d0, r0, r1);
1063 __ vmov(d1, r2, r3); 1063 __ vmov(d1, r2, r3);
1064 } 1064 }
1065 { 1065 {
1066 AllowExternalCallThatCantCauseGC scope(masm); 1066 AllowExternalCallThatCantCauseGC scope(masm);
1067 __ CallCFunction( 1067 __ CallCFunction(
1068 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); 1068 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1069 } 1069 }
1070 // Store answer in the overwritable heap number. Double returned in 1070 // Store answer in the overwritable heap number. Double returned in
1071 // registers r0 and r1 or in d0. 1071 // registers r0 and r1 or in d0.
1072 if (masm->use_eabi_hardfloat()) { 1072 if (masm->use_eabi_hardfloat()) {
1073 CpuFeatures::Scope scope(VFP3); 1073 CpuFeatures::Scope scope(VFP2);
1074 __ vstr(d0, 1074 __ vstr(d0,
1075 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); 1075 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1076 } else { 1076 } else {
1077 __ Strd(r0, r1, FieldMemOperand(heap_number_result, 1077 __ Strd(r0, r1, FieldMemOperand(heap_number_result,
1078 HeapNumber::kValueOffset)); 1078 HeapNumber::kValueOffset));
1079 } 1079 }
1080 // Place heap_number_result in r0 and return to the pushed return address. 1080 // Place heap_number_result in r0 and return to the pushed return address.
1081 __ mov(r0, Operand(heap_number_result)); 1081 __ mov(r0, Operand(heap_number_result));
1082 __ pop(pc); 1082 __ pop(pc);
1083 } 1083 }
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after
1282 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); 1282 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1283 } 1283 }
1284 __ Ret(ne); 1284 __ Ret(ne);
1285 } else { 1285 } else {
1286 // Smi compared non-strictly with a non-Smi non-heap-number. Call 1286 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1287 // the runtime. 1287 // the runtime.
1288 __ b(ne, slow); 1288 __ b(ne, slow);
1289 } 1289 }
1290 1290
1291 // Lhs is a smi, rhs is a number. 1291 // Lhs is a smi, rhs is a number.
1292 if (CpuFeatures::IsSupported(VFP3)) { 1292 if (CpuFeatures::IsSupported(VFP2)) {
1293 // Convert lhs to a double in d7. 1293 // Convert lhs to a double in d7.
1294 CpuFeatures::Scope scope(VFP3); 1294 CpuFeatures::Scope scope(VFP2);
1295 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); 1295 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
1296 // Load the double from rhs, tagged HeapNumber r0, to d6. 1296 // Load the double from rhs, tagged HeapNumber r0, to d6.
1297 __ sub(r7, rhs, Operand(kHeapObjectTag)); 1297 __ sub(r7, rhs, Operand(kHeapObjectTag));
1298 __ vldr(d6, r7, HeapNumber::kValueOffset); 1298 __ vldr(d6, r7, HeapNumber::kValueOffset);
1299 } else { 1299 } else {
1300 __ push(lr); 1300 __ push(lr);
1301 // Convert lhs to a double in r2, r3. 1301 // Convert lhs to a double in r2, r3.
1302 __ mov(r7, Operand(lhs)); 1302 __ mov(r7, Operand(lhs));
1303 ConvertToDoubleStub stub1(r3, r2, r7, r6); 1303 ConvertToDoubleStub stub1(r3, r2, r7, r6);
1304 __ Call(stub1.GetCode()); 1304 __ Call(stub1.GetCode());
(...skipping 17 matching lines...) Expand all
1322 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); 1322 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1323 } 1323 }
1324 __ Ret(ne); 1324 __ Ret(ne);
1325 } else { 1325 } else {
1326 // Smi compared non-strictly with a non-smi non-heap-number. Call 1326 // Smi compared non-strictly with a non-smi non-heap-number. Call
1327 // the runtime. 1327 // the runtime.
1328 __ b(ne, slow); 1328 __ b(ne, slow);
1329 } 1329 }
1330 1330
1331 // Rhs is a smi, lhs is a heap number. 1331 // Rhs is a smi, lhs is a heap number.
1332 if (CpuFeatures::IsSupported(VFP3)) { 1332 if (CpuFeatures::IsSupported(VFP2)) {
1333 CpuFeatures::Scope scope(VFP3); 1333 CpuFeatures::Scope scope(VFP2);
1334 // Load the double from lhs, tagged HeapNumber r1, to d7. 1334 // Load the double from lhs, tagged HeapNumber r1, to d7.
1335 __ sub(r7, lhs, Operand(kHeapObjectTag)); 1335 __ sub(r7, lhs, Operand(kHeapObjectTag));
1336 __ vldr(d7, r7, HeapNumber::kValueOffset); 1336 __ vldr(d7, r7, HeapNumber::kValueOffset);
1337 // Convert rhs to a double in d6 . 1337 // Convert rhs to a double in d6 .
1338 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); 1338 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
1339 } else { 1339 } else {
1340 __ push(lr); 1340 __ push(lr);
1341 // Load lhs to a double in r2, r3. 1341 // Load lhs to a double in r2, r3.
1342 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1342 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1343 // Convert rhs to a double in r0, r1. 1343 // Convert rhs to a double in r0, r1.
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
1435 // Now they are equal if and only if the lhs exponent is zero in its 1435 // Now they are equal if and only if the lhs exponent is zero in its
1436 // low 31 bits. 1436 // low 31 bits.
1437 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); 1437 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
1438 __ Ret(); 1438 __ Ret();
1439 } else { 1439 } else {
1440 // Call a native function to do a comparison between two non-NaNs. 1440 // Call a native function to do a comparison between two non-NaNs.
1441 // Call C routine that may not cause GC or other trouble. 1441 // Call C routine that may not cause GC or other trouble.
1442 __ push(lr); 1442 __ push(lr);
1443 __ PrepareCallCFunction(0, 2, r5); 1443 __ PrepareCallCFunction(0, 2, r5);
1444 if (masm->use_eabi_hardfloat()) { 1444 if (masm->use_eabi_hardfloat()) {
1445 CpuFeatures::Scope scope(VFP3); 1445 CpuFeatures::Scope scope(VFP2);
1446 __ vmov(d0, r0, r1); 1446 __ vmov(d0, r0, r1);
1447 __ vmov(d1, r2, r3); 1447 __ vmov(d1, r2, r3);
1448 } 1448 }
1449 1449
1450 AllowExternalCallThatCantCauseGC scope(masm); 1450 AllowExternalCallThatCantCauseGC scope(masm);
1451 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 1451 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1452 0, 2); 1452 0, 2);
1453 __ pop(pc); // Return. 1453 __ pop(pc); // Return.
1454 } 1454 }
1455 } 1455 }
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
1510 (lhs.is(r1) && rhs.is(r0))); 1510 (lhs.is(r1) && rhs.is(r0)));
1511 1511
1512 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); 1512 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
1513 __ b(ne, not_heap_numbers); 1513 __ b(ne, not_heap_numbers);
1514 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); 1514 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
1515 __ cmp(r2, r3); 1515 __ cmp(r2, r3);
1516 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. 1516 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
1517 1517
1518 // Both are heap numbers. Load them up then jump to the code we have 1518 // Both are heap numbers. Load them up then jump to the code we have
1519 // for that. 1519 // for that.
1520 if (CpuFeatures::IsSupported(VFP3)) { 1520 if (CpuFeatures::IsSupported(VFP2)) {
1521 CpuFeatures::Scope scope(VFP3); 1521 CpuFeatures::Scope scope(VFP2);
1522 __ sub(r7, rhs, Operand(kHeapObjectTag)); 1522 __ sub(r7, rhs, Operand(kHeapObjectTag));
1523 __ vldr(d6, r7, HeapNumber::kValueOffset); 1523 __ vldr(d6, r7, HeapNumber::kValueOffset);
1524 __ sub(r7, lhs, Operand(kHeapObjectTag)); 1524 __ sub(r7, lhs, Operand(kHeapObjectTag));
1525 __ vldr(d7, r7, HeapNumber::kValueOffset); 1525 __ vldr(d7, r7, HeapNumber::kValueOffset);
1526 } else { 1526 } else {
1527 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1527 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1528 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1528 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1529 } 1529 }
1530 __ jmp(both_loaded_as_doubles); 1530 __ jmp(both_loaded_as_doubles);
1531 } 1531 }
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
1600 1600
1601 // Calculate the entry in the number string cache. The hash value in the 1601 // Calculate the entry in the number string cache. The hash value in the
1602 // number string cache for smis is just the smi value, and the hash for 1602 // number string cache for smis is just the smi value, and the hash for
1603 // doubles is the xor of the upper and lower words. See 1603 // doubles is the xor of the upper and lower words. See
1604 // Heap::GetNumberStringCache. 1604 // Heap::GetNumberStringCache.
1605 Isolate* isolate = masm->isolate(); 1605 Isolate* isolate = masm->isolate();
1606 Label is_smi; 1606 Label is_smi;
1607 Label load_result_from_cache; 1607 Label load_result_from_cache;
1608 if (!object_is_smi) { 1608 if (!object_is_smi) {
1609 __ JumpIfSmi(object, &is_smi); 1609 __ JumpIfSmi(object, &is_smi);
1610 if (CpuFeatures::IsSupported(VFP3)) { 1610 if (CpuFeatures::IsSupported(VFP2)) {
1611 CpuFeatures::Scope scope(VFP3); 1611 CpuFeatures::Scope scope(VFP2);
1612 __ CheckMap(object, 1612 __ CheckMap(object,
1613 scratch1, 1613 scratch1,
1614 Heap::kHeapNumberMapRootIndex, 1614 Heap::kHeapNumberMapRootIndex,
1615 not_found, 1615 not_found,
1616 DONT_DO_SMI_CHECK); 1616 DONT_DO_SMI_CHECK);
1617 1617
1618 STATIC_ASSERT(8 == kDoubleSize); 1618 STATIC_ASSERT(8 == kDoubleSize);
1619 __ add(scratch1, 1619 __ add(scratch1,
1620 object, 1620 object,
1621 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); 1621 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
1732 // In cases 3 and 4 we have found out we were dealing with a number-number 1732 // In cases 3 and 4 we have found out we were dealing with a number-number
1733 // comparison. If VFP3 is supported the double values of the numbers have 1733 // comparison. If VFP3 is supported the double values of the numbers have
1734 // been loaded into d7 and d6. Otherwise, the double values have been loaded 1734 // been loaded into d7 and d6. Otherwise, the double values have been loaded
1735 // into r0, r1, r2, and r3. 1735 // into r0, r1, r2, and r3.
1736 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); 1736 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
1737 1737
1738 __ bind(&both_loaded_as_doubles); 1738 __ bind(&both_loaded_as_doubles);
1739 // The arguments have been converted to doubles and stored in d6 and d7, if 1739 // The arguments have been converted to doubles and stored in d6 and d7, if
1740 // VFP3 is supported, or in r0, r1, r2, and r3. 1740 // VFP3 is supported, or in r0, r1, r2, and r3.
1741 Isolate* isolate = masm->isolate(); 1741 Isolate* isolate = masm->isolate();
1742 if (CpuFeatures::IsSupported(VFP3)) { 1742 if (CpuFeatures::IsSupported(VFP2)) {
1743 __ bind(&lhs_not_nan); 1743 __ bind(&lhs_not_nan);
1744 CpuFeatures::Scope scope(VFP3); 1744 CpuFeatures::Scope scope(VFP2);
1745 Label no_nan; 1745 Label no_nan;
1746 // ARMv7 VFP3 instructions to implement double precision comparison. 1746 // ARMv7 VFP3 instructions to implement double precision comparison.
1747 __ VFPCompareAndSetFlags(d7, d6); 1747 __ VFPCompareAndSetFlags(d7, d6);
1748 Label nan; 1748 Label nan;
1749 __ b(vs, &nan); 1749 __ b(vs, &nan);
1750 __ mov(r0, Operand(EQUAL), LeaveCC, eq); 1750 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1751 __ mov(r0, Operand(LESS), LeaveCC, lt); 1751 __ mov(r0, Operand(LESS), LeaveCC, lt);
1752 __ mov(r0, Operand(GREATER), LeaveCC, gt); 1752 __ mov(r0, Operand(GREATER), LeaveCC, gt);
1753 __ Ret(); 1753 __ Ret();
1754 1754
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
1853 __ InvokeBuiltin(native, JUMP_FUNCTION); 1853 __ InvokeBuiltin(native, JUMP_FUNCTION);
1854 } 1854 }
1855 1855
1856 1856
1857 // The stub expects its argument in the tos_ register and returns its result in 1857 // The stub expects its argument in the tos_ register and returns its result in
1858 // it, too: zero for false, and a non-zero value for true. 1858 // it, too: zero for false, and a non-zero value for true.
1859 void ToBooleanStub::Generate(MacroAssembler* masm) { 1859 void ToBooleanStub::Generate(MacroAssembler* masm) {
1860 // This stub overrides SometimesSetsUpAFrame() to return false. That means 1860 // This stub overrides SometimesSetsUpAFrame() to return false. That means
1861 // we cannot call anything that could cause a GC from this stub. 1861 // we cannot call anything that could cause a GC from this stub.
1862 // This stub uses VFP3 instructions. 1862 // This stub uses VFP3 instructions.
1863 CpuFeatures::Scope scope(VFP3); 1863 CpuFeatures::Scope scope(VFP2);
1864 1864
1865 Label patch; 1865 Label patch;
1866 const Register map = r9.is(tos_) ? r7 : r9; 1866 const Register map = r9.is(tos_) ? r7 : r9;
1867 1867
1868 // undefined -> false. 1868 // undefined -> false.
1869 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); 1869 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1870 1870
1871 // Boolean -> its value. 1871 // Boolean -> its value.
1872 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); 1872 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1873 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); 1873 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
1965 1); 1965 1);
1966 } 1966 }
1967 1967
1968 1968
1969 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 1969 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1970 // We don't allow a GC during a store buffer overflow so there is no need to 1970 // We don't allow a GC during a store buffer overflow so there is no need to
1971 // store the registers in any particular way, but we do have to store and 1971 // store the registers in any particular way, but we do have to store and
1972 // restore them. 1972 // restore them.
1973 __ stm(db_w, sp, kCallerSaved | lr.bit()); 1973 __ stm(db_w, sp, kCallerSaved | lr.bit());
1974 if (save_doubles_ == kSaveFPRegs) { 1974 if (save_doubles_ == kSaveFPRegs) {
1975 CpuFeatures::Scope scope(VFP3); 1975 CpuFeatures::Scope scope(VFP2);
1976 __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); 1976 __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1977 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { 1977 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1978 DwVfpRegister reg = DwVfpRegister::from_code(i); 1978 DwVfpRegister reg = DwVfpRegister::from_code(i);
1979 __ vstr(reg, MemOperand(sp, i * kDoubleSize)); 1979 __ vstr(reg, MemOperand(sp, i * kDoubleSize));
1980 } 1980 }
1981 } 1981 }
1982 const int argument_count = 1; 1982 const int argument_count = 1;
1983 const int fp_argument_count = 0; 1983 const int fp_argument_count = 0;
1984 const Register scratch = r1; 1984 const Register scratch = r1;
1985 1985
1986 AllowExternalCallThatCantCauseGC scope(masm); 1986 AllowExternalCallThatCantCauseGC scope(masm);
1987 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); 1987 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1988 __ mov(r0, Operand(ExternalReference::isolate_address())); 1988 __ mov(r0, Operand(ExternalReference::isolate_address()));
1989 __ CallCFunction( 1989 __ CallCFunction(
1990 ExternalReference::store_buffer_overflow_function(masm->isolate()), 1990 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1991 argument_count); 1991 argument_count);
1992 if (save_doubles_ == kSaveFPRegs) { 1992 if (save_doubles_ == kSaveFPRegs) {
1993 CpuFeatures::Scope scope(VFP3); 1993 CpuFeatures::Scope scope(VFP2);
1994 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { 1994 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1995 DwVfpRegister reg = DwVfpRegister::from_code(i); 1995 DwVfpRegister reg = DwVfpRegister::from_code(i);
1996 __ vldr(reg, MemOperand(sp, i * kDoubleSize)); 1996 __ vldr(reg, MemOperand(sp, i * kDoubleSize));
1997 } 1997 }
1998 __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); 1998 __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1999 } 1999 }
2000 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). 2000 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
2001 } 2001 }
2002 2002
2003 2003
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after
2213 // Convert the heap number in r0 to an untagged integer in r1. 2213 // Convert the heap number in r0 to an untagged integer in r1.
2214 // This can't go slow-case because it's the same number we already 2214 // This can't go slow-case because it's the same number we already
2215 // converted once again. 2215 // converted once again.
2216 __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible); 2216 __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
2217 __ mvn(r1, Operand(r1)); 2217 __ mvn(r1, Operand(r1));
2218 2218
2219 __ bind(&heapnumber_allocated); 2219 __ bind(&heapnumber_allocated);
2220 __ mov(r0, r2); // Move newly allocated heap number to r0. 2220 __ mov(r0, r2); // Move newly allocated heap number to r0.
2221 } 2221 }
2222 2222
2223 if (CpuFeatures::IsSupported(VFP3)) { 2223 if (CpuFeatures::IsSupported(VFP2)) {
2224 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. 2224 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
2225 CpuFeatures::Scope scope(VFP3); 2225 CpuFeatures::Scope scope(VFP2);
2226 __ vmov(s0, r1); 2226 __ vmov(s0, r1);
2227 __ vcvt_f64_s32(d0, s0); 2227 __ vcvt_f64_s32(d0, s0);
2228 __ sub(r2, r0, Operand(kHeapObjectTag)); 2228 __ sub(r2, r0, Operand(kHeapObjectTag));
2229 __ vstr(d0, r2, HeapNumber::kValueOffset); 2229 __ vstr(d0, r2, HeapNumber::kValueOffset);
2230 __ Ret(); 2230 __ Ret();
2231 } else { 2231 } else {
2232 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not 2232 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2233 // have to set up a frame. 2233 // have to set up a frame.
2234 WriteInt32ToHeapNumberStub stub(r1, r0, r2); 2234 WriteInt32ToHeapNumberStub stub(r1, r0, r2);
2235 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 2235 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
(...skipping 279 matching lines...) Expand 10 before | Expand all | Expand 10 after
2515 2515
2516 switch (op_) { 2516 switch (op_) {
2517 case Token::ADD: 2517 case Token::ADD:
2518 case Token::SUB: 2518 case Token::SUB:
2519 case Token::MUL: 2519 case Token::MUL:
2520 case Token::DIV: 2520 case Token::DIV:
2521 case Token::MOD: { 2521 case Token::MOD: {
2522 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 2522 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2523 // depending on whether VFP3 is available or not. 2523 // depending on whether VFP3 is available or not.
2524 FloatingPointHelper::Destination destination = 2524 FloatingPointHelper::Destination destination =
2525 CpuFeatures::IsSupported(VFP3) && 2525 CpuFeatures::IsSupported(VFP2) &&
2526 op_ != Token::MOD ? 2526 op_ != Token::MOD ?
2527 FloatingPointHelper::kVFPRegisters : 2527 FloatingPointHelper::kVFPRegisters :
2528 FloatingPointHelper::kCoreRegisters; 2528 FloatingPointHelper::kCoreRegisters;
2529 2529
2530 // Allocate new heap number for result. 2530 // Allocate new heap number for result.
2531 Register result = r5; 2531 Register result = r5;
2532 GenerateHeapResultAllocation( 2532 GenerateHeapResultAllocation(
2533 masm, result, heap_number_map, scratch1, scratch2, gc_required); 2533 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2534 2534
2535 // Load the operands. 2535 // Load the operands.
2536 if (smi_operands) { 2536 if (smi_operands) {
2537 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); 2537 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2538 } else { 2538 } else {
2539 FloatingPointHelper::LoadOperands(masm, 2539 FloatingPointHelper::LoadOperands(masm,
2540 destination, 2540 destination,
2541 heap_number_map, 2541 heap_number_map,
2542 scratch1, 2542 scratch1,
2543 scratch2, 2543 scratch2,
2544 not_numbers); 2544 not_numbers);
2545 } 2545 }
2546 2546
2547 // Calculate the result. 2547 // Calculate the result.
2548 if (destination == FloatingPointHelper::kVFPRegisters) { 2548 if (destination == FloatingPointHelper::kVFPRegisters) {
2549 // Using VFP registers: 2549 // Using VFP registers:
2550 // d6: Left value 2550 // d6: Left value
2551 // d7: Right value 2551 // d7: Right value
2552 CpuFeatures::Scope scope(VFP3); 2552 CpuFeatures::Scope scope(VFP2);
2553 switch (op_) { 2553 switch (op_) {
2554 case Token::ADD: 2554 case Token::ADD:
2555 __ vadd(d5, d6, d7); 2555 __ vadd(d5, d6, d7);
2556 break; 2556 break;
2557 case Token::SUB: 2557 case Token::SUB:
2558 __ vsub(d5, d6, d7); 2558 __ vsub(d5, d6, d7);
2559 break; 2559 break;
2560 case Token::MUL: 2560 case Token::MUL:
2561 __ vmul(d5, d6, d7); 2561 __ vmul(d5, d6, d7);
2562 break; 2562 break;
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
2631 __ mov(r2, Operand(r3, ASR, r2)); 2631 __ mov(r2, Operand(r3, ASR, r2));
2632 break; 2632 break;
2633 case Token::SHR: 2633 case Token::SHR:
2634 // Use only the 5 least significant bits of the shift count. 2634 // Use only the 5 least significant bits of the shift count.
2635 __ GetLeastBitsFromInt32(r2, r2, 5); 2635 __ GetLeastBitsFromInt32(r2, r2, 5);
2636 __ mov(r2, Operand(r3, LSR, r2), SetCC); 2636 __ mov(r2, Operand(r3, LSR, r2), SetCC);
2637 // SHR is special because it is required to produce a positive answer. 2637 // SHR is special because it is required to produce a positive answer.
2638 // The code below for writing into heap numbers isn't capable of 2638 // The code below for writing into heap numbers isn't capable of
2639 // writing the register as an unsigned int so we go to slow case if we 2639 // writing the register as an unsigned int so we go to slow case if we
2640 // hit this case. 2640 // hit this case.
2641 if (CpuFeatures::IsSupported(VFP3)) { 2641 if (CpuFeatures::IsSupported(VFP2)) {
2642 __ b(mi, &result_not_a_smi); 2642 __ b(mi, &result_not_a_smi);
2643 } else { 2643 } else {
2644 __ b(mi, not_numbers); 2644 __ b(mi, not_numbers);
2645 } 2645 }
2646 break; 2646 break;
2647 case Token::SHL: 2647 case Token::SHL:
2648 // Use only the 5 least significant bits of the shift count. 2648 // Use only the 5 least significant bits of the shift count.
2649 __ GetLeastBitsFromInt32(r2, r2, 5); 2649 __ GetLeastBitsFromInt32(r2, r2, 5);
2650 __ mov(r2, Operand(r3, LSL, r2)); 2650 __ mov(r2, Operand(r3, LSL, r2));
2651 break; 2651 break;
(...skipping 18 matching lines...) Expand all
2670 masm, result, heap_number_map, scratch1, scratch2, gc_required); 2670 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2671 } 2671 }
2672 2672
2673 // r2: Answer as signed int32. 2673 // r2: Answer as signed int32.
2674 // r5: Heap number to write answer into. 2674 // r5: Heap number to write answer into.
2675 2675
2676 // Nothing can go wrong now, so move the heap number to r0, which is the 2676 // Nothing can go wrong now, so move the heap number to r0, which is the
2677 // result. 2677 // result.
2678 __ mov(r0, Operand(r5)); 2678 __ mov(r0, Operand(r5));
2679 2679
2680 if (CpuFeatures::IsSupported(VFP3)) { 2680 if (CpuFeatures::IsSupported(VFP2)) {
2681 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As 2681 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
2682 // mentioned above SHR needs to always produce a positive result. 2682 // mentioned above SHR needs to always produce a positive result.
2683 CpuFeatures::Scope scope(VFP3); 2683 CpuFeatures::Scope scope(VFP2);
2684 __ vmov(s0, r2); 2684 __ vmov(s0, r2);
2685 if (op_ == Token::SHR) { 2685 if (op_ == Token::SHR) {
2686 __ vcvt_f64_u32(d0, s0); 2686 __ vcvt_f64_u32(d0, s0);
2687 } else { 2687 } else {
2688 __ vcvt_f64_s32(d0, s0); 2688 __ vcvt_f64_s32(d0, s0);
2689 } 2689 }
2690 __ sub(r3, r0, Operand(kHeapObjectTag)); 2690 __ sub(r3, r0, Operand(kHeapObjectTag));
2691 __ vstr(d0, r3, HeapNumber::kValueOffset); 2691 __ vstr(d0, r3, HeapNumber::kValueOffset);
2692 __ Ret(); 2692 __ Ret();
2693 } else { 2693 } else {
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
2832 switch (op_) { 2832 switch (op_) {
2833 case Token::ADD: 2833 case Token::ADD:
2834 case Token::SUB: 2834 case Token::SUB:
2835 case Token::MUL: 2835 case Token::MUL:
2836 case Token::DIV: 2836 case Token::DIV:
2837 case Token::MOD: { 2837 case Token::MOD: {
2838 // Load both operands and check that they are 32-bit integer. 2838 // Load both operands and check that they are 32-bit integer.
2839 // Jump to type transition if they are not. The registers r0 and r1 (right 2839 // Jump to type transition if they are not. The registers r0 and r1 (right
2840 // and left) are preserved for the runtime call. 2840 // and left) are preserved for the runtime call.
2841 FloatingPointHelper::Destination destination = 2841 FloatingPointHelper::Destination destination =
2842 (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD) 2842 (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
2843 ? FloatingPointHelper::kVFPRegisters 2843 ? FloatingPointHelper::kVFPRegisters
2844 : FloatingPointHelper::kCoreRegisters; 2844 : FloatingPointHelper::kCoreRegisters;
2845 2845
2846 FloatingPointHelper::LoadNumberAsInt32Double(masm, 2846 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2847 right, 2847 right,
2848 destination, 2848 destination,
2849 d7, 2849 d7,
2850 r2, 2850 r2,
2851 r3, 2851 r3,
2852 heap_number_map, 2852 heap_number_map,
2853 scratch1, 2853 scratch1,
2854 scratch2, 2854 scratch2,
2855 s0, 2855 s0,
2856 &transition); 2856 &transition);
2857 FloatingPointHelper::LoadNumberAsInt32Double(masm, 2857 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2858 left, 2858 left,
2859 destination, 2859 destination,
2860 d6, 2860 d6,
2861 r4, 2861 r4,
2862 r5, 2862 r5,
2863 heap_number_map, 2863 heap_number_map,
2864 scratch1, 2864 scratch1,
2865 scratch2, 2865 scratch2,
2866 s0, 2866 s0,
2867 &transition); 2867 &transition);
2868 2868
2869 if (destination == FloatingPointHelper::kVFPRegisters) { 2869 if (destination == FloatingPointHelper::kVFPRegisters) {
2870 CpuFeatures::Scope scope(VFP3); 2870 CpuFeatures::Scope scope(VFP2);
2871 Label return_heap_number; 2871 Label return_heap_number;
2872 switch (op_) { 2872 switch (op_) {
2873 case Token::ADD: 2873 case Token::ADD:
2874 __ vadd(d5, d6, d7); 2874 __ vadd(d5, d6, d7);
2875 break; 2875 break;
2876 case Token::SUB: 2876 case Token::SUB:
2877 __ vsub(d5, d6, d7); 2877 __ vsub(d5, d6, d7);
2878 break; 2878 break;
2879 case Token::MUL: 2879 case Token::MUL:
2880 __ vmul(d5, d6, d7); 2880 __ vmul(d5, d6, d7);
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
3027 __ and_(r2, r2, Operand(0x1f)); 3027 __ and_(r2, r2, Operand(0x1f));
3028 __ mov(r2, Operand(r3, ASR, r2)); 3028 __ mov(r2, Operand(r3, ASR, r2));
3029 break; 3029 break;
3030 case Token::SHR: 3030 case Token::SHR:
3031 __ and_(r2, r2, Operand(0x1f)); 3031 __ and_(r2, r2, Operand(0x1f));
3032 __ mov(r2, Operand(r3, LSR, r2), SetCC); 3032 __ mov(r2, Operand(r3, LSR, r2), SetCC);
3033 // SHR is special because it is required to produce a positive answer. 3033 // SHR is special because it is required to produce a positive answer.
3034 // We only get a negative result if the shift value (r2) is 0. 3034 // We only get a negative result if the shift value (r2) is 0.
3035 // This result cannot be respresented as a signed 32-bit integer, try 3035 // This result cannot be respresented as a signed 32-bit integer, try
3036 // to return a heap number if we can. 3036 // to return a heap number if we can.
3037 // The non vfp3 code does not support this special case, so jump to 3037 // The non vfp2 code does not support this special case, so jump to
3038 // runtime if we don't support it. 3038 // runtime if we don't support it.
3039 if (CpuFeatures::IsSupported(VFP3)) { 3039 if (CpuFeatures::IsSupported(VFP2)) {
3040 __ b(mi, (result_type_ <= BinaryOpIC::INT32) 3040 __ b(mi, (result_type_ <= BinaryOpIC::INT32)
3041 ? &transition 3041 ? &transition
3042 : &return_heap_number); 3042 : &return_heap_number);
3043 } else { 3043 } else {
3044 __ b(mi, (result_type_ <= BinaryOpIC::INT32) 3044 __ b(mi, (result_type_ <= BinaryOpIC::INT32)
3045 ? &transition 3045 ? &transition
3046 : &call_runtime); 3046 : &call_runtime);
3047 } 3047 }
3048 break; 3048 break;
3049 case Token::SHL: 3049 case Token::SHL:
(...skipping 14 matching lines...) Expand all
3064 3064
3065 __ bind(&return_heap_number); 3065 __ bind(&return_heap_number);
3066 heap_number_result = r5; 3066 heap_number_result = r5;
3067 GenerateHeapResultAllocation(masm, 3067 GenerateHeapResultAllocation(masm,
3068 heap_number_result, 3068 heap_number_result,
3069 heap_number_map, 3069 heap_number_map,
3070 scratch1, 3070 scratch1,
3071 scratch2, 3071 scratch2,
3072 &call_runtime); 3072 &call_runtime);
3073 3073
3074 if (CpuFeatures::IsSupported(VFP3)) { 3074 if (CpuFeatures::IsSupported(VFP2)) {
3075 CpuFeatures::Scope scope(VFP3); 3075 CpuFeatures::Scope scope(VFP2);
3076 if (op_ != Token::SHR) { 3076 if (op_ != Token::SHR) {
3077 // Convert the result to a floating point value. 3077 // Convert the result to a floating point value.
3078 __ vmov(double_scratch.low(), r2); 3078 __ vmov(double_scratch.low(), r2);
3079 __ vcvt_f64_s32(double_scratch, double_scratch.low()); 3079 __ vcvt_f64_s32(double_scratch, double_scratch.low());
3080 } else { 3080 } else {
3081 // The result must be interpreted as an unsigned 32-bit integer. 3081 // The result must be interpreted as an unsigned 32-bit integer.
3082 __ vmov(double_scratch.low(), r2); 3082 __ vmov(double_scratch.low(), r2);
3083 __ vcvt_f64_u32(double_scratch, double_scratch.low()); 3083 __ vcvt_f64_u32(double_scratch, double_scratch.low());
3084 } 3084 }
3085 3085
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
3294 3294
3295 Label input_not_smi; 3295 Label input_not_smi;
3296 Label loaded; 3296 Label loaded;
3297 Label calculate; 3297 Label calculate;
3298 Label invalid_cache; 3298 Label invalid_cache;
3299 const Register scratch0 = r9; 3299 const Register scratch0 = r9;
3300 const Register scratch1 = r7; 3300 const Register scratch1 = r7;
3301 const Register cache_entry = r0; 3301 const Register cache_entry = r0;
3302 const bool tagged = (argument_type_ == TAGGED); 3302 const bool tagged = (argument_type_ == TAGGED);
3303 3303
3304 if (CpuFeatures::IsSupported(VFP3)) { 3304 if (CpuFeatures::IsSupported(VFP2)) {
3305 CpuFeatures::Scope scope(VFP3); 3305 CpuFeatures::Scope scope(VFP2);
3306 if (tagged) { 3306 if (tagged) {
3307 // Argument is a number and is on stack and in r0. 3307 // Argument is a number and is on stack and in r0.
3308 // Load argument and check if it is a smi. 3308 // Load argument and check if it is a smi.
3309 __ JumpIfNotSmi(r0, &input_not_smi); 3309 __ JumpIfNotSmi(r0, &input_not_smi);
3310 3310
3311 // Input is a smi. Convert to double and load the low and high words 3311 // Input is a smi. Convert to double and load the low and high words
3312 // of the double into r2, r3. 3312 // of the double into r2, r3.
3313 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); 3313 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3314 __ b(&loaded); 3314 __ b(&loaded);
3315 3315
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
3396 __ bind(&calculate); 3396 __ bind(&calculate);
3397 Counters* counters = masm->isolate()->counters(); 3397 Counters* counters = masm->isolate()->counters();
3398 __ IncrementCounter( 3398 __ IncrementCounter(
3399 counters->transcendental_cache_miss(), 1, scratch0, scratch1); 3399 counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3400 if (tagged) { 3400 if (tagged) {
3401 __ bind(&invalid_cache); 3401 __ bind(&invalid_cache);
3402 ExternalReference runtime_function = 3402 ExternalReference runtime_function =
3403 ExternalReference(RuntimeFunction(), masm->isolate()); 3403 ExternalReference(RuntimeFunction(), masm->isolate());
3404 __ TailCallExternalReference(runtime_function, 1, 1); 3404 __ TailCallExternalReference(runtime_function, 1, 1);
3405 } else { 3405 } else {
3406 ASSERT(CpuFeatures::IsSupported(VFP3)); 3406 ASSERT(CpuFeatures::IsSupported(VFP2));
3407 CpuFeatures::Scope scope(VFP3); 3407 CpuFeatures::Scope scope(VFP2);
3408 3408
3409 Label no_update; 3409 Label no_update;
3410 Label skip_cache; 3410 Label skip_cache;
3411 3411
3412 // Call C function to calculate the result and update the cache. 3412 // Call C function to calculate the result and update the cache.
3413 // r0: precalculated cache entry address. 3413 // r0: precalculated cache entry address.
3414 // r2 and r3: parts of the double value. 3414 // r2 and r3: parts of the double value.
3415 // Store r0, r2 and r3 on stack for later before calling C function. 3415 // Store r0, r2 and r3 on stack for later before calling C function.
3416 __ Push(r3, r2, cache_entry); 3416 __ Push(r3, r2, cache_entry);
3417 GenerateCallCFunction(masm, scratch0); 3417 GenerateCallCFunction(masm, scratch0);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
3458 __ push(scratch0); 3458 __ push(scratch0);
3459 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 3459 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3460 } 3460 }
3461 __ Ret(); 3461 __ Ret();
3462 } 3462 }
3463 } 3463 }
3464 3464
3465 3465
3466 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, 3466 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3467 Register scratch) { 3467 Register scratch) {
3468 ASSERT(CpuFeatures::IsEnabled(VFP2));
3468 Isolate* isolate = masm->isolate(); 3469 Isolate* isolate = masm->isolate();
3469 3470
3470 __ push(lr); 3471 __ push(lr);
3471 __ PrepareCallCFunction(0, 1, scratch); 3472 __ PrepareCallCFunction(0, 1, scratch);
3472 if (masm->use_eabi_hardfloat()) { 3473 if (masm->use_eabi_hardfloat()) {
3473 __ vmov(d0, d2); 3474 __ vmov(d0, d2);
3474 } else { 3475 } else {
3475 __ vmov(r0, r1, d2); 3476 __ vmov(r0, r1, d2);
3476 } 3477 }
3477 AllowExternalCallThatCantCauseGC scope(masm); 3478 AllowExternalCallThatCantCauseGC scope(masm);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
3518 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); 3519 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3519 } 3520 }
3520 3521
3521 3522
3522 void InterruptStub::Generate(MacroAssembler* masm) { 3523 void InterruptStub::Generate(MacroAssembler* masm) {
3523 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); 3524 __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3524 } 3525 }
3525 3526
3526 3527
3527 void MathPowStub::Generate(MacroAssembler* masm) { 3528 void MathPowStub::Generate(MacroAssembler* masm) {
3528 CpuFeatures::Scope vfp3_scope(VFP3); 3529 CpuFeatures::Scope vfp2_scope(VFP2);
3529 const Register base = r1; 3530 const Register base = r1;
3530 const Register exponent = r2; 3531 const Register exponent = r2;
3531 const Register heapnumbermap = r5; 3532 const Register heapnumbermap = r5;
3532 const Register heapnumber = r0; 3533 const Register heapnumber = r0;
3533 const DoubleRegister double_base = d1; 3534 const DoubleRegister double_base = d1;
3534 const DoubleRegister double_exponent = d2; 3535 const DoubleRegister double_exponent = d2;
3535 const DoubleRegister double_result = d3; 3536 const DoubleRegister double_result = d3;
3536 const DoubleRegister double_scratch = d0; 3537 const DoubleRegister double_scratch = d0;
3537 const SwVfpRegister single_scratch = s0; 3538 const SwVfpRegister single_scratch = s0;
3538 const Register scratch = r9; 3539 const Register scratch = r9;
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
3617 3618
3618 // Calculates square root of base. Check for the special case of 3619 // Calculates square root of base. Check for the special case of
3619 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). 3620 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3620 __ vmov(double_scratch, -V8_INFINITY); 3621 __ vmov(double_scratch, -V8_INFINITY);
3621 __ VFPCompareAndSetFlags(double_base, double_scratch); 3622 __ VFPCompareAndSetFlags(double_base, double_scratch);
3622 __ vmov(double_result, kDoubleRegZero, eq); 3623 __ vmov(double_result, kDoubleRegZero, eq);
3623 __ b(eq, &done); 3624 __ b(eq, &done);
3624 3625
3625 // Add +0 to convert -0 to +0. 3626 // Add +0 to convert -0 to +0.
3626 __ vadd(double_scratch, double_base, kDoubleRegZero); 3627 __ vadd(double_scratch, double_base, kDoubleRegZero);
3627 __ vmov(double_result, 1); 3628 __ vmov(double_result, 1.0);
3628 __ vsqrt(double_scratch, double_scratch); 3629 __ vsqrt(double_scratch, double_scratch);
3629 __ vdiv(double_result, double_result, double_scratch); 3630 __ vdiv(double_result, double_result, double_scratch);
3630 __ jmp(&done); 3631 __ jmp(&done);
3631 } 3632 }
3632 3633
3633 __ push(lr); 3634 __ push(lr);
3634 { 3635 {
3635 AllowExternalCallThatCantCauseGC scope(masm); 3636 AllowExternalCallThatCantCauseGC scope(masm);
3636 __ PrepareCallCFunction(0, 2, scratch); 3637 __ PrepareCallCFunction(0, 2, scratch);
3637 __ SetCallCDoubleArguments(double_base, double_exponent); 3638 __ SetCallCDoubleArguments(double_base, double_exponent);
(...skipping 336 matching lines...) Expand 10 before | Expand all | Expand 10 after
3974 // r3: argc 3975 // r3: argc
3975 // [sp+0]: argv 3976 // [sp+0]: argv
3976 3977
3977 Label invoke, handler_entry, exit; 3978 Label invoke, handler_entry, exit;
3978 3979
3979 // Called from C, so do not pop argc and args on exit (preserve sp) 3980 // Called from C, so do not pop argc and args on exit (preserve sp)
3980 // No need to save register-passed args 3981 // No need to save register-passed args
3981 // Save callee-saved registers (incl. cp and fp), sp, and lr 3982 // Save callee-saved registers (incl. cp and fp), sp, and lr
3982 __ stm(db_w, sp, kCalleeSaved | lr.bit()); 3983 __ stm(db_w, sp, kCalleeSaved | lr.bit());
3983 3984
3984 if (CpuFeatures::IsSupported(VFP3)) { 3985 if (CpuFeatures::IsSupported(VFP2)) {
3985 CpuFeatures::Scope scope(VFP3); 3986 CpuFeatures::Scope scope(VFP2);
3986 // Save callee-saved vfp registers. 3987 // Save callee-saved vfp registers.
3987 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); 3988 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
3988 // Set up the reserved register for 0.0. 3989 // Set up the reserved register for 0.0.
3989 __ vmov(kDoubleRegZero, 0.0); 3990 __ Vmov(kDoubleRegZero, 0.0);
Rodolph Perfetta 2012/07/25 10:59:54 The Vmov macro will use kDoubleRegZero if one wont
3990 } 3991 }
3991 3992
3992 // Get address of argv, see stm above. 3993 // Get address of argv, see stm above.
3993 // r0: code entry 3994 // r0: code entry
3994 // r1: function 3995 // r1: function
3995 // r2: receiver 3996 // r2: receiver
3996 // r3: argc 3997 // r3: argc
3997 3998
3998 // Set up argv in r4. 3999 // Set up argv in r4.
3999 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; 4000 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4000 if (CpuFeatures::IsSupported(VFP3)) { 4001 if (CpuFeatures::IsSupported(VFP2)) {
4001 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; 4002 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
4002 } 4003 }
4003 __ ldr(r4, MemOperand(sp, offset_to_argv)); 4004 __ ldr(r4, MemOperand(sp, offset_to_argv));
4004 4005
4005 // Push a frame with special values setup to mark it as an entry frame. 4006 // Push a frame with special values setup to mark it as an entry frame.
4006 // r0: code entry 4007 // r0: code entry
4007 // r1: function 4008 // r1: function
4008 // r2: receiver 4009 // r2: receiver
4009 // r3: argc 4010 // r3: argc
4010 // r4: argv 4011 // r4: argv
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
4128 // Reset the stack to the callee saved registers. 4129 // Reset the stack to the callee saved registers.
4129 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); 4130 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
4130 4131
4131 // Restore callee-saved registers and return. 4132 // Restore callee-saved registers and return.
4132 #ifdef DEBUG 4133 #ifdef DEBUG
4133 if (FLAG_debug_code) { 4134 if (FLAG_debug_code) {
4134 __ mov(lr, Operand(pc)); 4135 __ mov(lr, Operand(pc));
4135 } 4136 }
4136 #endif 4137 #endif
4137 4138
4138 if (CpuFeatures::IsSupported(VFP3)) { 4139 if (CpuFeatures::IsSupported(VFP2)) {
4139 CpuFeatures::Scope scope(VFP3); 4140 CpuFeatures::Scope scope(VFP2);
4140 // Restore callee-saved vfp registers. 4141 // Restore callee-saved vfp registers.
4141 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); 4142 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
4142 } 4143 }
4143 4144
4144 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); 4145 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
4145 } 4146 }
4146 4147
4147 4148
4148 // Uses registers r0 to r4. 4149 // Uses registers r0 to r4.
4149 // Expected input (depending on whether args are in registers or on the stack): 4150 // Expected input (depending on whether args are in registers or on the stack):
(...skipping 2506 matching lines...) Expand 10 before | Expand all | Expand 10 after
6656 __ and_(r2, r1, Operand(r0)); 6657 __ and_(r2, r1, Operand(r0));
6657 __ JumpIfSmi(r2, &generic_stub); 6658 __ JumpIfSmi(r2, &generic_stub);
6658 6659
6659 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); 6660 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
6660 __ b(ne, &maybe_undefined1); 6661 __ b(ne, &maybe_undefined1);
6661 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); 6662 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6662 __ b(ne, &maybe_undefined2); 6663 __ b(ne, &maybe_undefined2);
6663 6664
6664 // Inlining the double comparison and falling back to the general compare 6665 // Inlining the double comparison and falling back to the general compare
6665 // stub if NaN is involved or VFP3 is unsupported. 6666 // stub if NaN is involved or VFP3 is unsupported.
6666 if (CpuFeatures::IsSupported(VFP3)) { 6667 if (CpuFeatures::IsSupported(VFP2)) {
6667 CpuFeatures::Scope scope(VFP3); 6668 CpuFeatures::Scope scope(VFP2);
6668 6669
6669 // Load left and right operand 6670 // Load left and right operand
6670 __ sub(r2, r1, Operand(kHeapObjectTag)); 6671 __ sub(r2, r1, Operand(kHeapObjectTag));
6671 __ vldr(d0, r2, HeapNumber::kValueOffset); 6672 __ vldr(d0, r2, HeapNumber::kValueOffset);
6672 __ sub(r2, r0, Operand(kHeapObjectTag)); 6673 __ sub(r2, r0, Operand(kHeapObjectTag));
6673 __ vldr(d1, r2, HeapNumber::kValueOffset); 6674 __ vldr(d1, r2, HeapNumber::kValueOffset);
6674 6675
6675 // Compare operands 6676 // Compare operands
6676 __ VFPCompareAndSetFlags(d0, d1); 6677 __ VFPCompareAndSetFlags(d0, d1);
6677 6678
(...skipping 892 matching lines...) Expand 10 before | Expand all | Expand 10 after
7570 7571
7571 __ Pop(lr, r5, r1); 7572 __ Pop(lr, r5, r1);
7572 __ Ret(); 7573 __ Ret();
7573 } 7574 }
7574 7575
7575 #undef __ 7576 #undef __
7576 7577
7577 } } // namespace v8::internal 7578 } } // namespace v8::internal
7578 7579
7579 #endif // V8_TARGET_ARCH_ARM 7580 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698