Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(97)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 12391055: Cleaned up CpuFeature scope handling. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fixed nits Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/codegen-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 596 matching lines...) Expand 10 before | Expand all | Expand 10 after
607 __ Ret(USE_DELAY_SLOT); 607 __ Ret(USE_DELAY_SLOT);
608 __ or_(exponent, exponent, source_); 608 __ or_(exponent, exponent, source_);
609 } 609 }
610 610
611 611
612 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, 612 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
613 FloatingPointHelper::Destination destination, 613 FloatingPointHelper::Destination destination,
614 Register scratch1, 614 Register scratch1,
615 Register scratch2) { 615 Register scratch2) {
616 if (CpuFeatures::IsSupported(FPU)) { 616 if (CpuFeatures::IsSupported(FPU)) {
617 CpuFeatures::Scope scope(FPU); 617 CpuFeatureScope scope(masm, FPU);
618 __ sra(scratch1, a0, kSmiTagSize); 618 __ sra(scratch1, a0, kSmiTagSize);
619 __ mtc1(scratch1, f14); 619 __ mtc1(scratch1, f14);
620 __ cvt_d_w(f14, f14); 620 __ cvt_d_w(f14, f14);
621 __ sra(scratch1, a1, kSmiTagSize); 621 __ sra(scratch1, a1, kSmiTagSize);
622 __ mtc1(scratch1, f12); 622 __ mtc1(scratch1, f12);
623 __ cvt_d_w(f12, f12); 623 __ cvt_d_w(f12, f12);
624 if (destination == kCoreRegisters) { 624 if (destination == kCoreRegisters) {
625 __ Move(a2, a3, f14); 625 __ Move(a2, a3, f14);
626 __ Move(a0, a1, f12); 626 __ Move(a0, a1, f12);
627 } 627 }
(...skipping 30 matching lines...) Expand all
658 Label is_smi, done; 658 Label is_smi, done;
659 659
660 // Smi-check 660 // Smi-check
661 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); 661 __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
662 // Heap number check 662 // Heap number check
663 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); 663 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
664 664
665 // Handle loading a double from a heap number. 665 // Handle loading a double from a heap number.
666 if (CpuFeatures::IsSupported(FPU) && 666 if (CpuFeatures::IsSupported(FPU) &&
667 destination == kFPURegisters) { 667 destination == kFPURegisters) {
668 CpuFeatures::Scope scope(FPU); 668 CpuFeatureScope scope(masm, FPU);
669 // Load the double from tagged HeapNumber to double register. 669 // Load the double from tagged HeapNumber to double register.
670 670
671 // ARM uses a workaround here because of the unaligned HeapNumber 671 // ARM uses a workaround here because of the unaligned HeapNumber
672 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no 672 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
673 // point in generating even more instructions. 673 // point in generating even more instructions.
674 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); 674 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
675 } else { 675 } else {
676 ASSERT(destination == kCoreRegisters); 676 ASSERT(destination == kCoreRegisters);
677 // Load the double from heap number to dst1 and dst2 in double format. 677 // Load the double from heap number to dst1 and dst2 in double format.
678 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); 678 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
679 __ lw(dst2, FieldMemOperand(object, 679 __ lw(dst2, FieldMemOperand(object,
680 HeapNumber::kValueOffset + kPointerSize)); 680 HeapNumber::kValueOffset + kPointerSize));
681 } 681 }
682 __ Branch(&done); 682 __ Branch(&done);
683 683
684 // Handle loading a double from a smi. 684 // Handle loading a double from a smi.
685 __ bind(&is_smi); 685 __ bind(&is_smi);
686 if (CpuFeatures::IsSupported(FPU)) { 686 if (CpuFeatures::IsSupported(FPU)) {
687 CpuFeatures::Scope scope(FPU); 687 CpuFeatureScope scope(masm, FPU);
688 // Convert smi to double using FPU instructions. 688 // Convert smi to double using FPU instructions.
689 __ mtc1(scratch1, dst); 689 __ mtc1(scratch1, dst);
690 __ cvt_d_w(dst, dst); 690 __ cvt_d_w(dst, dst);
691 if (destination == kCoreRegisters) { 691 if (destination == kCoreRegisters) {
692 // Load the converted smi to dst1 and dst2 in double format. 692 // Load the converted smi to dst1 and dst2 in double format.
693 __ Move(dst1, dst2, dst); 693 __ Move(dst1, dst2, dst);
694 } 694 }
695 } else { 695 } else {
696 ASSERT(destination == kCoreRegisters); 696 ASSERT(destination == kCoreRegisters);
697 // Write smi to dst1 and dst2 double format. 697 // Write smi to dst1 and dst2 double format.
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
753 Register dst_exponent, 753 Register dst_exponent,
754 Register scratch2, 754 Register scratch2,
755 FPURegister single_scratch) { 755 FPURegister single_scratch) {
756 ASSERT(!int_scratch.is(scratch2)); 756 ASSERT(!int_scratch.is(scratch2));
757 ASSERT(!int_scratch.is(dst_mantissa)); 757 ASSERT(!int_scratch.is(dst_mantissa));
758 ASSERT(!int_scratch.is(dst_exponent)); 758 ASSERT(!int_scratch.is(dst_exponent));
759 759
760 Label done; 760 Label done;
761 761
762 if (CpuFeatures::IsSupported(FPU)) { 762 if (CpuFeatures::IsSupported(FPU)) {
763 CpuFeatures::Scope scope(FPU); 763 CpuFeatureScope scope(masm, FPU);
764 __ mtc1(int_scratch, single_scratch); 764 __ mtc1(int_scratch, single_scratch);
765 __ cvt_d_w(double_dst, single_scratch); 765 __ cvt_d_w(double_dst, single_scratch);
766 if (destination == kCoreRegisters) { 766 if (destination == kCoreRegisters) {
767 __ Move(dst_mantissa, dst_exponent, double_dst); 767 __ Move(dst_mantissa, dst_exponent, double_dst);
768 } 768 }
769 } else { 769 } else {
770 Label fewer_than_20_useful_bits; 770 Label fewer_than_20_useful_bits;
771 // Expected output: 771 // Expected output:
772 // | dst_exponent | dst_mantissa | 772 // | dst_exponent | dst_mantissa |
773 // | s | exp | mantissa | 773 // | s | exp | mantissa |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
855 __ Branch(&done); 855 __ Branch(&done);
856 856
857 __ bind(&obj_is_not_smi); 857 __ bind(&obj_is_not_smi);
858 __ AssertRootValue(heap_number_map, 858 __ AssertRootValue(heap_number_map,
859 Heap::kHeapNumberMapRootIndex, 859 Heap::kHeapNumberMapRootIndex,
860 "HeapNumberMap register clobbered."); 860 "HeapNumberMap register clobbered.");
861 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); 861 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
862 862
863 // Load the number. 863 // Load the number.
864 if (CpuFeatures::IsSupported(FPU)) { 864 if (CpuFeatures::IsSupported(FPU)) {
865 CpuFeatures::Scope scope(FPU); 865 CpuFeatureScope scope(masm, FPU);
866 // Load the double value. 866 // Load the double value.
867 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); 867 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
868 868
869 Register except_flag = scratch2; 869 Register except_flag = scratch2;
870 __ EmitFPUTruncate(kRoundToZero, 870 __ EmitFPUTruncate(kRoundToZero,
871 scratch1, 871 scratch1,
872 double_dst, 872 double_dst,
873 at, 873 at,
874 double_scratch, 874 double_scratch,
875 except_flag, 875 except_flag,
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
952 952
953 __ AssertRootValue(heap_number_map, 953 __ AssertRootValue(heap_number_map,
954 Heap::kHeapNumberMapRootIndex, 954 Heap::kHeapNumberMapRootIndex,
955 "HeapNumberMap register clobbered."); 955 "HeapNumberMap register clobbered.");
956 956
957 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); 957 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
958 958
959 // Object is a heap number. 959 // Object is a heap number.
960 // Convert the floating point value to a 32-bit integer. 960 // Convert the floating point value to a 32-bit integer.
961 if (CpuFeatures::IsSupported(FPU)) { 961 if (CpuFeatures::IsSupported(FPU)) {
962 CpuFeatures::Scope scope(FPU); 962 CpuFeatureScope scope(masm, FPU);
963 // Load the double value. 963 // Load the double value.
964 __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); 964 __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
965 965
966 Register except_flag = scratch2; 966 Register except_flag = scratch2;
967 __ EmitFPUTruncate(kRoundToZero, 967 __ EmitFPUTruncate(kRoundToZero,
968 dst, 968 dst,
969 double_scratch0, 969 double_scratch0,
970 scratch1, 970 scratch1,
971 double_scratch1, 971 double_scratch1,
972 except_flag, 972 except_flag,
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
1090 // a3: Right value (sign, exponent, top of mantissa). 1090 // a3: Right value (sign, exponent, top of mantissa).
1091 1091
1092 // Assert that heap_number_result is saved. 1092 // Assert that heap_number_result is saved.
1093 // We currently always use s0 to pass it. 1093 // We currently always use s0 to pass it.
1094 ASSERT(heap_number_result.is(s0)); 1094 ASSERT(heap_number_result.is(s0));
1095 1095
1096 // Push the current return address before the C call. 1096 // Push the current return address before the C call.
1097 __ push(ra); 1097 __ push(ra);
1098 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. 1098 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
1099 if (!IsMipsSoftFloatABI) { 1099 if (!IsMipsSoftFloatABI) {
1100 CpuFeatures::Scope scope(FPU); 1100 CpuFeatureScope scope(masm, FPU);
1101 // We are not using MIPS FPU instructions, and parameters for the runtime 1101 // We are not using MIPS FPU instructions, and parameters for the runtime
1102 // function call are prepaired in a0-a3 registers, but function we are 1102 // function call are prepaired in a0-a3 registers, but function we are
1103 // calling is compiled with hard-float flag and expecting hard float ABI 1103 // calling is compiled with hard-float flag and expecting hard float ABI
1104 // (parameters in f12/f14 registers). We need to copy parameters from 1104 // (parameters in f12/f14 registers). We need to copy parameters from
1105 // a0-a3 registers to f12/f14 register pairs. 1105 // a0-a3 registers to f12/f14 register pairs.
1106 __ Move(f12, a0, a1); 1106 __ Move(f12, a0, a1);
1107 __ Move(f14, a2, a3); 1107 __ Move(f14, a2, a3);
1108 } 1108 }
1109 { 1109 {
1110 AllowExternalCallThatCantCauseGC scope(masm); 1110 AllowExternalCallThatCantCauseGC scope(masm);
1111 __ CallCFunction( 1111 __ CallCFunction(
1112 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); 1112 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1113 } 1113 }
1114 // Store answer in the overwritable heap number. 1114 // Store answer in the overwritable heap number.
1115 if (!IsMipsSoftFloatABI) { 1115 if (!IsMipsSoftFloatABI) {
1116 CpuFeatures::Scope scope(FPU); 1116 CpuFeatureScope scope(masm, FPU);
1117 // Double returned in register f0. 1117 // Double returned in register f0.
1118 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); 1118 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1119 } else { 1119 } else {
1120 // Double returned in registers v0 and v1. 1120 // Double returned in registers v0 and v1.
1121 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); 1121 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1122 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); 1122 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1123 } 1123 }
1124 // Place heap_number_result in v0 and return to the pushed return address. 1124 // Place heap_number_result in v0 and return to the pushed return address.
1125 __ pop(ra); 1125 __ pop(ra);
1126 __ Ret(USE_DELAY_SLOT); 1126 __ Ret(USE_DELAY_SLOT);
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
1330 __ mov(v0, lhs); 1330 __ mov(v0, lhs);
1331 } else { 1331 } else {
1332 // Smi compared non-strictly with a non-Smi non-heap-number. Call 1332 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1333 // the runtime. 1333 // the runtime.
1334 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); 1334 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1335 } 1335 }
1336 1336
1337 // Rhs is a smi, lhs is a number. 1337 // Rhs is a smi, lhs is a number.
1338 // Convert smi rhs to double. 1338 // Convert smi rhs to double.
1339 if (CpuFeatures::IsSupported(FPU)) { 1339 if (CpuFeatures::IsSupported(FPU)) {
1340 CpuFeatures::Scope scope(FPU); 1340 CpuFeatureScope scope(masm, FPU);
1341 __ sra(at, rhs, kSmiTagSize); 1341 __ sra(at, rhs, kSmiTagSize);
1342 __ mtc1(at, f14); 1342 __ mtc1(at, f14);
1343 __ cvt_d_w(f14, f14); 1343 __ cvt_d_w(f14, f14);
1344 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1344 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1345 } else { 1345 } else {
1346 // Load lhs to a double in a2, a3. 1346 // Load lhs to a double in a2, a3.
1347 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); 1347 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1348 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1348 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1349 1349
1350 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. 1350 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
(...skipping 18 matching lines...) Expand all
1369 __ li(v0, Operand(1)); 1369 __ li(v0, Operand(1));
1370 } else { 1370 } else {
1371 // Smi compared non-strictly with a non-Smi non-heap-number. Call 1371 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1372 // the runtime. 1372 // the runtime.
1373 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); 1373 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1374 } 1374 }
1375 1375
1376 // Lhs is a smi, rhs is a number. 1376 // Lhs is a smi, rhs is a number.
1377 // Convert smi lhs to double. 1377 // Convert smi lhs to double.
1378 if (CpuFeatures::IsSupported(FPU)) { 1378 if (CpuFeatures::IsSupported(FPU)) {
1379 CpuFeatures::Scope scope(FPU); 1379 CpuFeatureScope scope(masm, FPU);
1380 __ sra(at, lhs, kSmiTagSize); 1380 __ sra(at, lhs, kSmiTagSize);
1381 __ mtc1(at, f12); 1381 __ mtc1(at, f12);
1382 __ cvt_d_w(f12, f12); 1382 __ cvt_d_w(f12, f12);
1383 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1383 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1384 } else { 1384 } else {
1385 // Convert lhs to a double format. t5 is scratch. 1385 // Convert lhs to a double format. t5 is scratch.
1386 __ mov(t6, lhs); 1386 __ mov(t6, lhs);
1387 ConvertToDoubleStub stub2(a3, a2, t6, t5); 1387 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1388 __ push(ra); 1388 __ push(ra);
1389 __ Call(stub2.GetCode(masm->isolate())); 1389 __ Call(stub2.GetCode(masm->isolate()));
1390 __ pop(ra); 1390 __ pop(ra);
1391 // Load rhs to a double in a1, a0. 1391 // Load rhs to a double in a1, a0.
1392 if (rhs.is(a0)) { 1392 if (rhs.is(a0)) {
1393 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1393 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1394 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1394 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1395 } else { 1395 } else {
1396 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1396 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1397 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1397 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1398 } 1398 }
1399 } 1399 }
1400 // Fall through to both_loaded_as_doubles. 1400 // Fall through to both_loaded_as_doubles.
1401 } 1401 }
1402 1402
1403 1403
1404 void EmitNanCheck(MacroAssembler* masm, Condition cc) { 1404 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
1405 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); 1405 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1406 if (CpuFeatures::IsSupported(FPU)) { 1406 if (CpuFeatures::IsSupported(FPU)) {
1407 CpuFeatures::Scope scope(FPU); 1407 CpuFeatureScope scope(masm, FPU);
1408 // Lhs and rhs are already loaded to f12 and f14 register pairs. 1408 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1409 __ Move(t0, t1, f14); 1409 __ Move(t0, t1, f14);
1410 __ Move(t2, t3, f12); 1410 __ Move(t2, t3, f12);
1411 } else { 1411 } else {
1412 // Lhs and rhs are already loaded to GP registers. 1412 // Lhs and rhs are already loaded to GP registers.
1413 __ mov(t0, a0); // a0 has LS 32 bits of rhs. 1413 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1414 __ mov(t1, a1); // a1 has MS 32 bits of rhs. 1414 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1415 __ mov(t2, a2); // a2 has LS 32 bits of lhs. 1415 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1416 __ mov(t3, a3); // a3 has MS 32 bits of lhs. 1416 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1417 } 1417 }
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1464 // Call C routine that may not cause GC or other trouble. 1464 // Call C routine that may not cause GC or other trouble.
1465 // We use a call_was and return manually because we need arguments slots to 1465 // We use a call_was and return manually because we need arguments slots to
1466 // be freed. 1466 // be freed.
1467 1467
1468 Label return_result_not_equal, return_result_equal; 1468 Label return_result_not_equal, return_result_equal;
1469 if (cc == eq) { 1469 if (cc == eq) {
1470 // Doubles are not equal unless they have the same bit pattern. 1470 // Doubles are not equal unless they have the same bit pattern.
1471 // Exception: 0 and -0. 1471 // Exception: 0 and -0.
1472 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); 1472 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1473 if (CpuFeatures::IsSupported(FPU)) { 1473 if (CpuFeatures::IsSupported(FPU)) {
1474 CpuFeatures::Scope scope(FPU); 1474 CpuFeatureScope scope(masm, FPU);
1475 // Lhs and rhs are already loaded to f12 and f14 register pairs. 1475 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1476 __ Move(t0, t1, f14); 1476 __ Move(t0, t1, f14);
1477 __ Move(t2, t3, f12); 1477 __ Move(t2, t3, f12);
1478 } else { 1478 } else {
1479 // Lhs and rhs are already loaded to GP registers. 1479 // Lhs and rhs are already loaded to GP registers.
1480 __ mov(t0, a0); // a0 has LS 32 bits of rhs. 1480 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1481 __ mov(t1, a1); // a1 has MS 32 bits of rhs. 1481 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1482 __ mov(t2, a2); // a2 has LS 32 bits of lhs. 1482 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1483 __ mov(t3, a3); // a3 has MS 32 bits of lhs. 1483 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1484 } 1484 }
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1520 __ Move(f12, a0, a1); 1520 __ Move(f12, a0, a1);
1521 __ Move(f14, a2, a3); 1521 __ Move(f14, a2, a3);
1522 } 1522 }
1523 1523
1524 AllowExternalCallThatCantCauseGC scope(masm); 1524 AllowExternalCallThatCantCauseGC scope(masm);
1525 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 1525 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1526 0, 2); 1526 0, 2);
1527 __ pop(ra); // Because this function returns int, result is in v0. 1527 __ pop(ra); // Because this function returns int, result is in v0.
1528 __ Ret(); 1528 __ Ret();
1529 } else { 1529 } else {
1530 CpuFeatures::Scope scope(FPU); 1530 CpuFeatureScope scope(masm, FPU);
1531 Label equal, less_than; 1531 Label equal, less_than;
1532 __ BranchF(&equal, NULL, eq, f12, f14); 1532 __ BranchF(&equal, NULL, eq, f12, f14);
1533 __ BranchF(&less_than, NULL, lt, f12, f14); 1533 __ BranchF(&less_than, NULL, lt, f12, f14);
1534 1534
1535 // Not equal, not less, not NaN, must be greater. 1535 // Not equal, not less, not NaN, must be greater.
1536 1536
1537 __ li(v0, Operand(GREATER)); 1537 __ li(v0, Operand(GREATER));
1538 __ Ret(); 1538 __ Ret();
1539 1539
1540 __ bind(&equal); 1540 __ bind(&equal);
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1596 Label* slow) { 1596 Label* slow) {
1597 __ GetObjectType(lhs, a3, a2); 1597 __ GetObjectType(lhs, a3, a2);
1598 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); 1598 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1599 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); 1599 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1600 // If first was a heap number & second wasn't, go to slow case. 1600 // If first was a heap number & second wasn't, go to slow case.
1601 __ Branch(slow, ne, a3, Operand(a2)); 1601 __ Branch(slow, ne, a3, Operand(a2));
1602 1602
1603 // Both are heap numbers. Load them up then jump to the code we have 1603 // Both are heap numbers. Load them up then jump to the code we have
1604 // for that. 1604 // for that.
1605 if (CpuFeatures::IsSupported(FPU)) { 1605 if (CpuFeatures::IsSupported(FPU)) {
1606 CpuFeatures::Scope scope(FPU); 1606 CpuFeatureScope scope(masm, FPU);
1607 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1607 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1608 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1608 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1609 } else { 1609 } else {
1610 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1610 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1611 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); 1611 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1612 if (rhs.is(a0)) { 1612 if (rhs.is(a0)) {
1613 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1613 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1614 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1614 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1615 } else { 1615 } else {
1616 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1616 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
1691 // Calculate the entry in the number string cache. The hash value in the 1691 // Calculate the entry in the number string cache. The hash value in the
1692 // number string cache for smis is just the smi value, and the hash for 1692 // number string cache for smis is just the smi value, and the hash for
1693 // doubles is the xor of the upper and lower words. See 1693 // doubles is the xor of the upper and lower words. See
1694 // Heap::GetNumberStringCache. 1694 // Heap::GetNumberStringCache.
1695 Isolate* isolate = masm->isolate(); 1695 Isolate* isolate = masm->isolate();
1696 Label is_smi; 1696 Label is_smi;
1697 Label load_result_from_cache; 1697 Label load_result_from_cache;
1698 if (!object_is_smi) { 1698 if (!object_is_smi) {
1699 __ JumpIfSmi(object, &is_smi); 1699 __ JumpIfSmi(object, &is_smi);
1700 if (CpuFeatures::IsSupported(FPU)) { 1700 if (CpuFeatures::IsSupported(FPU)) {
1701 CpuFeatures::Scope scope(FPU); 1701 CpuFeatureScope scope(masm, FPU);
1702 __ CheckMap(object, 1702 __ CheckMap(object,
1703 scratch1, 1703 scratch1,
1704 Heap::kHeapNumberMapRootIndex, 1704 Heap::kHeapNumberMapRootIndex,
1705 not_found, 1705 not_found,
1706 DONT_DO_SMI_CHECK); 1706 DONT_DO_SMI_CHECK);
1707 1707
1708 STATIC_ASSERT(8 == kDoubleSize); 1708 STATIC_ASSERT(8 == kDoubleSize);
1709 __ Addu(scratch1, 1709 __ Addu(scratch1,
1710 object, 1710 object,
1711 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); 1711 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
1844 EmitSmiNonsmiComparison(masm, lhs, rhs, 1844 EmitSmiNonsmiComparison(masm, lhs, rhs,
1845 &both_loaded_as_doubles, &slow, strict()); 1845 &both_loaded_as_doubles, &slow, strict());
1846 1846
1847 __ bind(&both_loaded_as_doubles); 1847 __ bind(&both_loaded_as_doubles);
1848 // f12, f14 are the double representations of the left hand side 1848 // f12, f14 are the double representations of the left hand side
1849 // and the right hand side if we have FPU. Otherwise a2, a3 represent 1849 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1850 // left hand side and a0, a1 represent right hand side. 1850 // left hand side and a0, a1 represent right hand side.
1851 1851
1852 Isolate* isolate = masm->isolate(); 1852 Isolate* isolate = masm->isolate();
1853 if (CpuFeatures::IsSupported(FPU)) { 1853 if (CpuFeatures::IsSupported(FPU)) {
1854 CpuFeatures::Scope scope(FPU); 1854 CpuFeatureScope scope(masm, FPU);
1855 Label nan; 1855 Label nan;
1856 __ li(t0, Operand(LESS)); 1856 __ li(t0, Operand(LESS));
1857 __ li(t1, Operand(GREATER)); 1857 __ li(t1, Operand(GREATER));
1858 __ li(t2, Operand(EQUAL)); 1858 __ li(t2, Operand(EQUAL));
1859 1859
1860 // Check if either rhs or lhs is NaN. 1860 // Check if either rhs or lhs is NaN.
1861 __ BranchF(NULL, &nan, eq, f12, f14); 1861 __ BranchF(NULL, &nan, eq, f12, f14);
1862 1862
1863 // Check if LESS condition is satisfied. If true, move conditionally 1863 // Check if LESS condition is satisfied. If true, move conditionally
1864 // result to v0. 1864 // result to v0.
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
1979 1979
1980 __ bind(&miss); 1980 __ bind(&miss);
1981 GenerateMiss(masm); 1981 GenerateMiss(masm);
1982 } 1982 }
1983 1983
1984 1984
1985 // The stub expects its argument in the tos_ register and returns its result in 1985 // The stub expects its argument in the tos_ register and returns its result in
1986 // it, too: zero for false, and a non-zero value for true. 1986 // it, too: zero for false, and a non-zero value for true.
1987 void ToBooleanStub::Generate(MacroAssembler* masm) { 1987 void ToBooleanStub::Generate(MacroAssembler* masm) {
1988 // This stub uses FPU instructions. 1988 // This stub uses FPU instructions.
1989 CpuFeatures::Scope scope(FPU); 1989 CpuFeatureScope scope(masm, FPU);
1990 1990
1991 Label patch; 1991 Label patch;
1992 const Register map = t5.is(tos_) ? t3 : t5; 1992 const Register map = t5.is(tos_) ? t3 : t5;
1993 1993
1994 // undefined -> false. 1994 // undefined -> false.
1995 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); 1995 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1996 1996
1997 // Boolean -> its value. 1997 // Boolean -> its value.
1998 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); 1998 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1999 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); 1999 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
2094 1); 2094 1);
2095 } 2095 }
2096 2096
2097 2097
2098 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 2098 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
2099 // We don't allow a GC during a store buffer overflow so there is no need to 2099 // We don't allow a GC during a store buffer overflow so there is no need to
2100 // store the registers in any particular way, but we do have to store and 2100 // store the registers in any particular way, but we do have to store and
2101 // restore them. 2101 // restore them.
2102 __ MultiPush(kJSCallerSaved | ra.bit()); 2102 __ MultiPush(kJSCallerSaved | ra.bit());
2103 if (save_doubles_ == kSaveFPRegs) { 2103 if (save_doubles_ == kSaveFPRegs) {
2104 CpuFeatures::Scope scope(FPU); 2104 CpuFeatureScope scope(masm, FPU);
2105 __ MultiPushFPU(kCallerSavedFPU); 2105 __ MultiPushFPU(kCallerSavedFPU);
2106 } 2106 }
2107 const int argument_count = 1; 2107 const int argument_count = 1;
2108 const int fp_argument_count = 0; 2108 const int fp_argument_count = 0;
2109 const Register scratch = a1; 2109 const Register scratch = a1;
2110 2110
2111 AllowExternalCallThatCantCauseGC scope(masm); 2111 AllowExternalCallThatCantCauseGC scope(masm);
2112 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); 2112 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
2113 __ li(a0, Operand(ExternalReference::isolate_address())); 2113 __ li(a0, Operand(ExternalReference::isolate_address()));
2114 __ CallCFunction( 2114 __ CallCFunction(
2115 ExternalReference::store_buffer_overflow_function(masm->isolate()), 2115 ExternalReference::store_buffer_overflow_function(masm->isolate()),
2116 argument_count); 2116 argument_count);
2117 if (save_doubles_ == kSaveFPRegs) { 2117 if (save_doubles_ == kSaveFPRegs) {
2118 CpuFeatures::Scope scope(FPU); 2118 CpuFeatureScope scope(masm, FPU);
2119 __ MultiPopFPU(kCallerSavedFPU); 2119 __ MultiPopFPU(kCallerSavedFPU);
2120 } 2120 }
2121 2121
2122 __ MultiPop(kJSCallerSaved | ra.bit()); 2122 __ MultiPop(kJSCallerSaved | ra.bit());
2123 __ Ret(); 2123 __ Ret();
2124 } 2124 }
2125 2125
2126 2126
2127 void UnaryOpStub::PrintName(StringStream* stream) { 2127 void UnaryOpStub::PrintName(StringStream* stream) {
2128 const char* op_name = Token::Name(op_); 2128 const char* op_name = Token::Name(op_);
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
2341 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); 2341 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2342 // Negate the result. 2342 // Negate the result.
2343 __ Xor(a1, a1, -1); 2343 __ Xor(a1, a1, -1);
2344 2344
2345 __ bind(&heapnumber_allocated); 2345 __ bind(&heapnumber_allocated);
2346 __ mov(v0, a2); // Move newly allocated heap number to v0. 2346 __ mov(v0, a2); // Move newly allocated heap number to v0.
2347 } 2347 }
2348 2348
2349 if (CpuFeatures::IsSupported(FPU)) { 2349 if (CpuFeatures::IsSupported(FPU)) {
2350 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. 2350 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2351 CpuFeatures::Scope scope(FPU); 2351 CpuFeatureScope scope(masm, FPU);
2352 __ mtc1(a1, f0); 2352 __ mtc1(a1, f0);
2353 __ cvt_d_w(f0, f0); 2353 __ cvt_d_w(f0, f0);
2354 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); 2354 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2355 __ Ret(); 2355 __ Ret();
2356 } else { 2356 } else {
2357 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not 2357 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2358 // have to set up a frame. 2358 // have to set up a frame.
2359 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3); 2359 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2360 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 2360 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
2361 } 2361 }
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after
2686 masm, destination, left, f12, a0, a1, heap_number_map, 2686 masm, destination, left, f12, a0, a1, heap_number_map,
2687 scratch1, scratch2, fail); 2687 scratch1, scratch2, fail);
2688 } 2688 }
2689 } 2689 }
2690 2690
2691 // Calculate the result. 2691 // Calculate the result.
2692 if (destination == FloatingPointHelper::kFPURegisters) { 2692 if (destination == FloatingPointHelper::kFPURegisters) {
2693 // Using FPU registers: 2693 // Using FPU registers:
2694 // f12: Left value. 2694 // f12: Left value.
2695 // f14: Right value. 2695 // f14: Right value.
2696 CpuFeatures::Scope scope(FPU); 2696 CpuFeatureScope scope(masm, FPU);
2697 switch (op) { 2697 switch (op) {
2698 case Token::ADD: 2698 case Token::ADD:
2699 __ add_d(f10, f12, f14); 2699 __ add_d(f10, f12, f14);
2700 break; 2700 break;
2701 case Token::SUB: 2701 case Token::SUB:
2702 __ sub_d(f10, f12, f14); 2702 __ sub_d(f10, f12, f14);
2703 break; 2703 break;
2704 case Token::MUL: 2704 case Token::MUL:
2705 __ mul_d(f10, f12, f14); 2705 __ mul_d(f10, f12, f14);
2706 break; 2706 break;
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
2818 // a2: Answer as signed int32. 2818 // a2: Answer as signed int32.
2819 // t1: Heap number to write answer into. 2819 // t1: Heap number to write answer into.
2820 2820
2821 // Nothing can go wrong now, so move the heap number to v0, which is the 2821 // Nothing can go wrong now, so move the heap number to v0, which is the
2822 // result. 2822 // result.
2823 __ mov(v0, t1); 2823 __ mov(v0, t1);
2824 2824
2825 if (CpuFeatures::IsSupported(FPU)) { 2825 if (CpuFeatures::IsSupported(FPU)) {
2826 // Convert the int32 in a2 to the heap number in a0. As 2826 // Convert the int32 in a2 to the heap number in a0. As
2827 // mentioned above SHR needs to always produce a positive result. 2827 // mentioned above SHR needs to always produce a positive result.
2828 CpuFeatures::Scope scope(FPU); 2828 CpuFeatureScope scope(masm, FPU);
2829 __ mtc1(a2, f0); 2829 __ mtc1(a2, f0);
2830 if (op == Token::SHR) { 2830 if (op == Token::SHR) {
2831 __ Cvt_d_uw(f0, f0, f22); 2831 __ Cvt_d_uw(f0, f0, f22);
2832 } else { 2832 } else {
2833 __ cvt_d_w(f0, f0); 2833 __ cvt_d_w(f0, f0);
2834 } 2834 }
2835 // ARM uses a workaround here because of the unaligned HeapNumber 2835 // ARM uses a workaround here because of the unaligned HeapNumber
2836 // kValueOffset. On MIPS this workaround is built into sdc1 so 2836 // kValueOffset. On MIPS this workaround is built into sdc1 so
2837 // there's no point in generating even more instructions. 2837 // there's no point in generating even more instructions.
2838 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); 2838 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
3013 f16, 3013 f16,
3014 t0, 3014 t0,
3015 t1, 3015 t1,
3016 heap_number_map, 3016 heap_number_map,
3017 scratch1, 3017 scratch1,
3018 scratch2, 3018 scratch2,
3019 f2, 3019 f2,
3020 &transition); 3020 &transition);
3021 3021
3022 if (destination == FloatingPointHelper::kFPURegisters) { 3022 if (destination == FloatingPointHelper::kFPURegisters) {
3023 CpuFeatures::Scope scope(FPU); 3023 CpuFeatureScope scope(masm, FPU);
3024 Label return_heap_number; 3024 Label return_heap_number;
3025 switch (op_) { 3025 switch (op_) {
3026 case Token::ADD: 3026 case Token::ADD:
3027 __ add_d(f10, f12, f14); 3027 __ add_d(f10, f12, f14);
3028 break; 3028 break;
3029 case Token::SUB: 3029 case Token::SUB:
3030 __ sub_d(f10, f12, f14); 3030 __ sub_d(f10, f12, f14);
3031 break; 3031 break;
3032 case Token::MUL: 3032 case Token::MUL:
3033 __ mul_d(f10, f12, f14); 3033 __ mul_d(f10, f12, f14);
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after
3227 heap_number_result = t1; 3227 heap_number_result = t1;
3228 BinaryOpStub_GenerateHeapResultAllocation(masm, 3228 BinaryOpStub_GenerateHeapResultAllocation(masm,
3229 heap_number_result, 3229 heap_number_result,
3230 heap_number_map, 3230 heap_number_map,
3231 scratch1, 3231 scratch1,
3232 scratch2, 3232 scratch2,
3233 &call_runtime, 3233 &call_runtime,
3234 mode_); 3234 mode_);
3235 3235
3236 if (CpuFeatures::IsSupported(FPU)) { 3236 if (CpuFeatures::IsSupported(FPU)) {
3237 CpuFeatures::Scope scope(FPU); 3237 CpuFeatureScope scope(masm, FPU);
3238 3238
3239 if (op_ != Token::SHR) { 3239 if (op_ != Token::SHR) {
3240 // Convert the result to a floating point value. 3240 // Convert the result to a floating point value.
3241 __ mtc1(a2, double_scratch); 3241 __ mtc1(a2, double_scratch);
3242 __ cvt_d_w(double_scratch, double_scratch); 3242 __ cvt_d_w(double_scratch, double_scratch);
3243 } else { 3243 } else {
3244 // The result must be interpreted as an unsigned 32-bit integer. 3244 // The result must be interpreted as an unsigned 32-bit integer.
3245 __ mtc1(a2, double_scratch); 3245 __ mtc1(a2, double_scratch);
3246 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); 3246 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
3247 } 3247 }
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
3431 Label input_not_smi; 3431 Label input_not_smi;
3432 Label loaded; 3432 Label loaded;
3433 Label calculate; 3433 Label calculate;
3434 Label invalid_cache; 3434 Label invalid_cache;
3435 const Register scratch0 = t5; 3435 const Register scratch0 = t5;
3436 const Register scratch1 = t3; 3436 const Register scratch1 = t3;
3437 const Register cache_entry = a0; 3437 const Register cache_entry = a0;
3438 const bool tagged = (argument_type_ == TAGGED); 3438 const bool tagged = (argument_type_ == TAGGED);
3439 3439
3440 if (CpuFeatures::IsSupported(FPU)) { 3440 if (CpuFeatures::IsSupported(FPU)) {
3441 CpuFeatures::Scope scope(FPU); 3441 CpuFeatureScope scope(masm, FPU);
3442 3442
3443 if (tagged) { 3443 if (tagged) {
3444 // Argument is a number and is on stack and in a0. 3444 // Argument is a number and is on stack and in a0.
3445 // Load argument and check if it is a smi. 3445 // Load argument and check if it is a smi.
3446 __ JumpIfNotSmi(a0, &input_not_smi); 3446 __ JumpIfNotSmi(a0, &input_not_smi);
3447 3447
3448 // Input is a smi. Convert to double and load the low and high words 3448 // Input is a smi. Convert to double and load the low and high words
3449 // of the double into a2, a3. 3449 // of the double into a2, a3.
3450 __ sra(t0, a0, kSmiTagSize); 3450 __ sra(t0, a0, kSmiTagSize);
3451 __ mtc1(t0, f4); 3451 __ mtc1(t0, f4);
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
3541 __ IncrementCounter( 3541 __ IncrementCounter(
3542 counters->transcendental_cache_miss(), 1, scratch0, scratch1); 3542 counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3543 if (tagged) { 3543 if (tagged) {
3544 __ bind(&invalid_cache); 3544 __ bind(&invalid_cache);
3545 __ TailCallExternalReference(ExternalReference(RuntimeFunction(), 3545 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3546 masm->isolate()), 3546 masm->isolate()),
3547 1, 3547 1,
3548 1); 3548 1);
3549 } else { 3549 } else {
3550 ASSERT(CpuFeatures::IsSupported(FPU)); 3550 ASSERT(CpuFeatures::IsSupported(FPU));
3551 CpuFeatures::Scope scope(FPU); 3551 CpuFeatureScope scope(masm, FPU);
3552 3552
3553 Label no_update; 3553 Label no_update;
3554 Label skip_cache; 3554 Label skip_cache;
3555 3555
3556 // Call C function to calculate the result and update the cache. 3556 // Call C function to calculate the result and update the cache.
3557 // a0: precalculated cache entry address. 3557 // a0: precalculated cache entry address.
3558 // a2 and a3: parts of the double value. 3558 // a2 and a3: parts of the double value.
3559 // Store a0, a2 and a3 on stack for later before calling C function. 3559 // Store a0, a2 and a3 on stack for later before calling C function.
3560 __ Push(a3, a2, cache_entry); 3560 __ Push(a3, a2, cache_entry);
3561 GenerateCallCFunction(masm, scratch0); 3561 GenerateCallCFunction(masm, scratch0);
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
3669 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); 3669 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3670 } 3670 }
3671 3671
3672 3672
3673 void InterruptStub::Generate(MacroAssembler* masm) { 3673 void InterruptStub::Generate(MacroAssembler* masm) {
3674 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); 3674 __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3675 } 3675 }
3676 3676
3677 3677
3678 void MathPowStub::Generate(MacroAssembler* masm) { 3678 void MathPowStub::Generate(MacroAssembler* masm) {
3679 CpuFeatures::Scope fpu_scope(FPU); 3679 CpuFeatureScope fpu_scope(masm, FPU);
3680 const Register base = a1; 3680 const Register base = a1;
3681 const Register exponent = a2; 3681 const Register exponent = a2;
3682 const Register heapnumbermap = t1; 3682 const Register heapnumbermap = t1;
3683 const Register heapnumber = v0; 3683 const Register heapnumber = v0;
3684 const DoubleRegister double_base = f2; 3684 const DoubleRegister double_base = f2;
3685 const DoubleRegister double_exponent = f4; 3685 const DoubleRegister double_exponent = f4;
3686 const DoubleRegister double_result = f0; 3686 const DoubleRegister double_result = f0;
3687 const DoubleRegister double_scratch = f6; 3687 const DoubleRegister double_scratch = f6;
3688 const FPURegister single_scratch = f8; 3688 const FPURegister single_scratch = f8;
3689 const Register scratch = t5; 3689 const Register scratch = t5;
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
3912 3912
3913 void CodeStub::GenerateFPStubs(Isolate* isolate) { 3913 void CodeStub::GenerateFPStubs(Isolate* isolate) {
3914 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) 3914 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
3915 ? kSaveFPRegs 3915 ? kSaveFPRegs
3916 : kDontSaveFPRegs; 3916 : kDontSaveFPRegs;
3917 CEntryStub save_doubles(1, mode); 3917 CEntryStub save_doubles(1, mode);
3918 StoreBufferOverflowStub stub(mode); 3918 StoreBufferOverflowStub stub(mode);
3919 // These stubs might already be in the snapshot, detect that and don't 3919 // These stubs might already be in the snapshot, detect that and don't
3920 // regenerate, which would lead to code stub initialization state being messed 3920 // regenerate, which would lead to code stub initialization state being messed
3921 // up. 3921 // up.
3922 Code* save_doubles_code = NULL; 3922 Code* save_doubles_code;
3923 Code* store_buffer_overflow_code = NULL; 3923 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
3924 if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) { 3924 save_doubles_code = *save_doubles.GetCode(isolate);
3925 if (CpuFeatures::IsSupported(FPU)) {
3926 CpuFeatures::Scope scope2(FPU);
3927 save_doubles_code = *save_doubles.GetCode(isolate);
3928 store_buffer_overflow_code = *stub.GetCode(isolate);
3929 } else {
3930 save_doubles_code = *save_doubles.GetCode(isolate);
3931 store_buffer_overflow_code = *stub.GetCode(isolate);
3932 }
3933 save_doubles_code->set_is_pregenerated(true); 3925 save_doubles_code->set_is_pregenerated(true);
3926
3927 Code* store_buffer_overflow_code = *stub.GetCode(isolate);
3934 store_buffer_overflow_code->set_is_pregenerated(true); 3928 store_buffer_overflow_code->set_is_pregenerated(true);
3935 } 3929 }
3936 ISOLATE->set_fp_stubs_generated(true); 3930 isolate->set_fp_stubs_generated(true);
3937 } 3931 }
3938 3932
3939 3933
3940 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { 3934 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
3941 CEntryStub stub(1, kDontSaveFPRegs); 3935 CEntryStub stub(1, kDontSaveFPRegs);
3942 Handle<Code> code = stub.GetCode(isolate); 3936 Handle<Code> code = stub.GetCode(isolate);
3943 code->set_is_pregenerated(true); 3937 code->set_is_pregenerated(true);
3944 } 3938 }
3945 3939
3946 3940
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after
4182 // a3: argc 4176 // a3: argc
4183 // 4177 //
4184 // Stack: 4178 // Stack:
4185 // 4 args slots 4179 // 4 args slots
4186 // args 4180 // args
4187 4181
4188 // Save callee saved registers on the stack. 4182 // Save callee saved registers on the stack.
4189 __ MultiPush(kCalleeSaved | ra.bit()); 4183 __ MultiPush(kCalleeSaved | ra.bit());
4190 4184
4191 if (CpuFeatures::IsSupported(FPU)) { 4185 if (CpuFeatures::IsSupported(FPU)) {
4192 CpuFeatures::Scope scope(FPU); 4186 CpuFeatureScope scope(masm, FPU);
4193 // Save callee-saved FPU registers. 4187 // Save callee-saved FPU registers.
4194 __ MultiPushFPU(kCalleeSavedFPU); 4188 __ MultiPushFPU(kCalleeSavedFPU);
4195 // Set up the reserved register for 0.0. 4189 // Set up the reserved register for 0.0.
4196 __ Move(kDoubleRegZero, 0.0); 4190 __ Move(kDoubleRegZero, 0.0);
4197 } 4191 }
4198 4192
4199 4193
4200 // Load argv in s0 register. 4194 // Load argv in s0 register.
4201 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; 4195 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4202 if (CpuFeatures::IsSupported(FPU)) { 4196 if (CpuFeatures::IsSupported(FPU)) {
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
4331 // Restore the top frame descriptors from the stack. 4325 // Restore the top frame descriptors from the stack.
4332 __ pop(t1); 4326 __ pop(t1);
4333 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, 4327 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4334 isolate))); 4328 isolate)));
4335 __ sw(t1, MemOperand(t0)); 4329 __ sw(t1, MemOperand(t0));
4336 4330
4337 // Reset the stack to the callee saved registers. 4331 // Reset the stack to the callee saved registers.
4338 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); 4332 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
4339 4333
4340 if (CpuFeatures::IsSupported(FPU)) { 4334 if (CpuFeatures::IsSupported(FPU)) {
4341 CpuFeatures::Scope scope(FPU); 4335 CpuFeatureScope scope(masm, FPU);
4342 // Restore callee-saved fpu registers. 4336 // Restore callee-saved fpu registers.
4343 __ MultiPopFPU(kCalleeSavedFPU); 4337 __ MultiPopFPU(kCalleeSavedFPU);
4344 } 4338 }
4345 4339
4346 // Restore callee saved registers from the stack. 4340 // Restore callee saved registers from the stack.
4347 __ MultiPop(kCalleeSaved | ra.bit()); 4341 __ MultiPop(kCalleeSaved | ra.bit());
4348 // Return. 4342 // Return.
4349 __ Jump(ra); 4343 __ Jump(ra);
4350 } 4344 }
4351 4345
(...skipping 2668 matching lines...) Expand 10 before | Expand all | Expand 10 after
7020 if (left_ == CompareIC::SMI) { 7014 if (left_ == CompareIC::SMI) {
7021 __ JumpIfNotSmi(a1, &miss); 7015 __ JumpIfNotSmi(a1, &miss);
7022 } 7016 }
7023 if (right_ == CompareIC::SMI) { 7017 if (right_ == CompareIC::SMI) {
7024 __ JumpIfNotSmi(a0, &miss); 7018 __ JumpIfNotSmi(a0, &miss);
7025 } 7019 }
7026 7020
7027 // Inlining the double comparison and falling back to the general compare 7021 // Inlining the double comparison and falling back to the general compare
7028 // stub if NaN is involved or FPU is unsupported. 7022 // stub if NaN is involved or FPU is unsupported.
7029 if (CpuFeatures::IsSupported(FPU)) { 7023 if (CpuFeatures::IsSupported(FPU)) {
7030 CpuFeatures::Scope scope(FPU); 7024 CpuFeatureScope scope(masm, FPU);
7031 7025
7032 // Load left and right operand. 7026 // Load left and right operand.
7033 Label done, left, left_smi, right_smi; 7027 Label done, left, left_smi, right_smi;
7034 __ JumpIfSmi(a0, &right_smi); 7028 __ JumpIfSmi(a0, &right_smi);
7035 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, 7029 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
7036 DONT_DO_SMI_CHECK); 7030 DONT_DO_SMI_CHECK);
7037 __ Subu(a2, a0, Operand(kHeapObjectTag)); 7031 __ Subu(a2, a0, Operand(kHeapObjectTag));
7038 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); 7032 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
7039 __ Branch(&left); 7033 __ Branch(&left);
7040 __ bind(&right_smi); 7034 __ bind(&right_smi);
(...skipping 987 matching lines...) Expand 10 before | Expand all | Expand 10 after
8028 __ Pop(ra, t1, a1); 8022 __ Pop(ra, t1, a1);
8029 __ Ret(); 8023 __ Ret();
8030 } 8024 }
8031 8025
8032 8026
8033 #undef __ 8027 #undef __
8034 8028
8035 } } // namespace v8::internal 8029 } } // namespace v8::internal
8036 8030
8037 #endif // V8_TARGET_ARCH_MIPS 8031 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/codegen-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698