Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 12391055: Cleaned up CpuFeature scope handling. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fixed nits Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-gap-resolver-arm.cc ('k') | src/arm/stub-cache-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
285 285
286 void MacroAssembler::Move(Register dst, Register src, Condition cond) { 286 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
287 if (!dst.is(src)) { 287 if (!dst.is(src)) {
288 mov(dst, src, LeaveCC, cond); 288 mov(dst, src, LeaveCC, cond);
289 } 289 }
290 } 290 }
291 291
292 292
293 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { 293 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
294 ASSERT(CpuFeatures::IsSupported(VFP2)); 294 ASSERT(CpuFeatures::IsSupported(VFP2));
295 CpuFeatures::Scope scope(VFP2); 295 CpuFeatureScope scope(this, VFP2);
296 if (!dst.is(src)) { 296 if (!dst.is(src)) {
297 vmov(dst, src); 297 vmov(dst, src);
298 } 298 }
299 } 299 }
300 300
301 301
302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, 302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
303 Condition cond) { 303 Condition cond) {
304 if (!src2.is_reg() && 304 if (!src2.is_reg() &&
305 !src2.must_output_reloc_info(this) && 305 !src2.must_output_reloc_info(this) &&
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after
710 ASSERT(!dst1.is(lr)); // r14. 710 ASSERT(!dst1.is(lr)); // r14.
711 ASSERT_EQ(0, dst1.code() % 2); 711 ASSERT_EQ(0, dst1.code() % 2);
712 ASSERT_EQ(dst1.code() + 1, dst2.code()); 712 ASSERT_EQ(dst1.code() + 1, dst2.code());
713 713
714 // V8 does not use this addressing mode, so the fallback code 714 // V8 does not use this addressing mode, so the fallback code
715 // below doesn't support it yet. 715 // below doesn't support it yet.
716 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); 716 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
717 717
718 // Generate two ldr instructions if ldrd is not available. 718 // Generate two ldr instructions if ldrd is not available.
719 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { 719 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
720 CpuFeatures::Scope scope(ARMv7); 720 CpuFeatureScope scope(this, ARMv7);
721 ldrd(dst1, dst2, src, cond); 721 ldrd(dst1, dst2, src, cond);
722 } else { 722 } else {
723 if ((src.am() == Offset) || (src.am() == NegOffset)) { 723 if ((src.am() == Offset) || (src.am() == NegOffset)) {
724 MemOperand src2(src); 724 MemOperand src2(src);
725 src2.set_offset(src2.offset() + 4); 725 src2.set_offset(src2.offset() + 4);
726 if (dst1.is(src.rn())) { 726 if (dst1.is(src.rn())) {
727 ldr(dst2, src2, cond); 727 ldr(dst2, src2, cond);
728 ldr(dst1, src, cond); 728 ldr(dst1, src, cond);
729 } else { 729 } else {
730 ldr(dst1, src, cond); 730 ldr(dst1, src, cond);
(...skipping 21 matching lines...) Expand all
752 ASSERT(!src1.is(lr)); // r14. 752 ASSERT(!src1.is(lr)); // r14.
753 ASSERT_EQ(0, src1.code() % 2); 753 ASSERT_EQ(0, src1.code() % 2);
754 ASSERT_EQ(src1.code() + 1, src2.code()); 754 ASSERT_EQ(src1.code() + 1, src2.code());
755 755
756 // V8 does not use this addressing mode, so the fallback code 756 // V8 does not use this addressing mode, so the fallback code
757 // below doesn't support it yet. 757 // below doesn't support it yet.
758 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); 758 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
759 759
760 // Generate two str instructions if strd is not available. 760 // Generate two str instructions if strd is not available.
761 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { 761 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
762 CpuFeatures::Scope scope(ARMv7); 762 CpuFeatureScope scope(this, ARMv7);
763 strd(src1, src2, dst, cond); 763 strd(src1, src2, dst, cond);
764 } else { 764 } else {
765 MemOperand dst2(dst); 765 MemOperand dst2(dst);
766 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { 766 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
767 dst2.set_offset(dst2.offset() + 4); 767 dst2.set_offset(dst2.offset() + 4);
768 str(src1, dst, cond); 768 str(src1, dst, cond);
769 str(src2, dst2, cond); 769 str(src2, dst2, cond);
770 } else { // PostIndex or NegPostIndex. 770 } else { // PostIndex or NegPostIndex.
771 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); 771 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
772 dst2.set_offset(dst2.offset() - 4); 772 dst2.set_offset(dst2.offset() - 4);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
806 const Register fpscr_flags, 806 const Register fpscr_flags,
807 const Condition cond) { 807 const Condition cond) {
808 // Compare and load FPSCR. 808 // Compare and load FPSCR.
809 vcmp(src1, src2, cond); 809 vcmp(src1, src2, cond);
810 vmrs(fpscr_flags, cond); 810 vmrs(fpscr_flags, cond);
811 } 811 }
812 812
813 void MacroAssembler::Vmov(const DwVfpRegister dst, 813 void MacroAssembler::Vmov(const DwVfpRegister dst,
814 const double imm, 814 const double imm,
815 const Register scratch) { 815 const Register scratch) {
816 ASSERT(CpuFeatures::IsEnabled(VFP2)); 816 ASSERT(IsEnabled(VFP2));
817 static const DoubleRepresentation minus_zero(-0.0); 817 static const DoubleRepresentation minus_zero(-0.0);
818 static const DoubleRepresentation zero(0.0); 818 static const DoubleRepresentation zero(0.0);
819 DoubleRepresentation value(imm); 819 DoubleRepresentation value(imm);
820 // Handle special values first. 820 // Handle special values first.
821 if (value.bits == zero.bits) { 821 if (value.bits == zero.bits) {
822 vmov(dst, kDoubleRegZero); 822 vmov(dst, kDoubleRegZero);
823 } else if (value.bits == minus_zero.bits) { 823 } else if (value.bits == minus_zero.bits) {
824 vneg(dst, kDoubleRegZero); 824 vneg(dst, kDoubleRegZero);
825 } else { 825 } else {
826 vmov(dst, imm, scratch); 826 vmov(dst, imm, scratch);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
868 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); 868 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
869 869
870 // Save the frame pointer and the context in top. 870 // Save the frame pointer and the context in top.
871 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 871 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
872 str(fp, MemOperand(ip)); 872 str(fp, MemOperand(ip));
873 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 873 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
874 str(cp, MemOperand(ip)); 874 str(cp, MemOperand(ip));
875 875
876 // Optionally save all double registers. 876 // Optionally save all double registers.
877 if (save_doubles) { 877 if (save_doubles) {
878 CpuFeatures::Scope scope(VFP2); 878 CpuFeatureScope scope(this, VFP2);
879 // Check CPU flags for number of registers, setting the Z condition flag. 879 // Check CPU flags for number of registers, setting the Z condition flag.
880 CheckFor32DRegs(ip); 880 CheckFor32DRegs(ip);
881 881
882 // Push registers d0-d15, and possibly d16-d31, on the stack. 882 // Push registers d0-d15, and possibly d16-d31, on the stack.
883 // If d16-d31 are not pushed, decrease the stack pointer instead. 883 // If d16-d31 are not pushed, decrease the stack pointer instead.
884 vstm(db_w, sp, d16, d31, ne); 884 vstm(db_w, sp, d16, d31, ne);
885 sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); 885 sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
886 vstm(db_w, sp, d0, d15); 886 vstm(db_w, sp, d0, d15);
887 // Note that d0 will be accessible at 887 // Note that d0 will be accessible at
888 // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize, 888 // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
933 // flag. 933 // flag.
934 return FLAG_sim_stack_alignment; 934 return FLAG_sim_stack_alignment;
935 #endif // defined(V8_HOST_ARCH_ARM) 935 #endif // defined(V8_HOST_ARCH_ARM)
936 } 936 }
937 937
938 938
939 void MacroAssembler::LeaveExitFrame(bool save_doubles, 939 void MacroAssembler::LeaveExitFrame(bool save_doubles,
940 Register argument_count) { 940 Register argument_count) {
941 // Optionally restore all double registers. 941 // Optionally restore all double registers.
942 if (save_doubles) { 942 if (save_doubles) {
943 CpuFeatures::Scope scope(VFP2); 943 CpuFeatureScope scope(this, VFP2);
944 // Calculate the stack location of the saved doubles and restore them. 944 // Calculate the stack location of the saved doubles and restore them.
945 const int offset = 2 * kPointerSize; 945 const int offset = 2 * kPointerSize;
946 sub(r3, fp, 946 sub(r3, fp,
947 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); 947 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
948 948
949 // Check CPU flags for number of registers, setting the Z condition flag. 949 // Check CPU flags for number of registers, setting the Z condition flag.
950 CheckFor32DRegs(ip); 950 CheckFor32DRegs(ip);
951 951
952 // Pop registers d0-d15, and possibly d16-d31, from r3. 952 // Pop registers d0-d15, and possibly d16-d31, from r3.
953 // If d16-d31 are not popped, increase r3 instead. 953 // If d16-d31 are not popped, increase r3 instead.
(...skipping 1119 matching lines...) Expand 10 before | Expand all | Expand 10 after
2073 SmiUntag(untagged_value, value_reg); 2073 SmiUntag(untagged_value, value_reg);
2074 FloatingPointHelper::ConvertIntToDouble(this, 2074 FloatingPointHelper::ConvertIntToDouble(this,
2075 untagged_value, 2075 untagged_value,
2076 destination, 2076 destination,
2077 d0, 2077 d0,
2078 mantissa_reg, 2078 mantissa_reg,
2079 exponent_reg, 2079 exponent_reg,
2080 scratch4, 2080 scratch4,
2081 s2); 2081 s2);
2082 if (destination == FloatingPointHelper::kVFPRegisters) { 2082 if (destination == FloatingPointHelper::kVFPRegisters) {
2083 CpuFeatures::Scope scope(VFP2); 2083 CpuFeatureScope scope(this, VFP2);
2084 vstr(d0, scratch1, 0); 2084 vstr(d0, scratch1, 0);
2085 } else { 2085 } else {
2086 str(mantissa_reg, MemOperand(scratch1, 0)); 2086 str(mantissa_reg, MemOperand(scratch1, 0));
2087 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); 2087 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
2088 } 2088 }
2089 bind(&done); 2089 bind(&done);
2090 } 2090 }
2091 2091
2092 2092
2093 void MacroAssembler::CompareMap(Register obj, 2093 void MacroAssembler::CompareMap(Register obj,
(...skipping 345 matching lines...) Expand 10 before | Expand all | Expand 10 after
2439 // Tries to get a signed int32 out of a double precision floating point heap 2439 // Tries to get a signed int32 out of a double precision floating point heap
2440 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the 2440 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
2441 // 32bits signed integer range. 2441 // 32bits signed integer range.
2442 void MacroAssembler::ConvertToInt32(Register source, 2442 void MacroAssembler::ConvertToInt32(Register source,
2443 Register dest, 2443 Register dest,
2444 Register scratch, 2444 Register scratch,
2445 Register scratch2, 2445 Register scratch2,
2446 DwVfpRegister double_scratch, 2446 DwVfpRegister double_scratch,
2447 Label *not_int32) { 2447 Label *not_int32) {
2448 if (CpuFeatures::IsSupported(VFP2)) { 2448 if (CpuFeatures::IsSupported(VFP2)) {
2449 CpuFeatures::Scope scope(VFP2); 2449 CpuFeatureScope scope(this, VFP2);
2450 sub(scratch, source, Operand(kHeapObjectTag)); 2450 sub(scratch, source, Operand(kHeapObjectTag));
2451 vldr(double_scratch, scratch, HeapNumber::kValueOffset); 2451 vldr(double_scratch, scratch, HeapNumber::kValueOffset);
2452 vcvt_s32_f64(double_scratch.low(), double_scratch); 2452 vcvt_s32_f64(double_scratch.low(), double_scratch);
2453 vmov(dest, double_scratch.low()); 2453 vmov(dest, double_scratch.low());
2454 // Signed vcvt instruction will saturate to the minimum (0x80000000) or 2454 // Signed vcvt instruction will saturate to the minimum (0x80000000) or
2455 // maximun (0x7fffffff) signed 32bits integer when the double is out of 2455 // maximun (0x7fffffff) signed 32bits integer when the double is out of
2456 // range. When substracting one, the minimum signed integer becomes the 2456 // range. When substracting one, the minimum signed integer becomes the
2457 // maximun signed integer. 2457 // maximun signed integer.
2458 sub(scratch, dest, Operand(1)); 2458 sub(scratch, dest, Operand(1));
2459 cmp(scratch, Operand(LONG_MAX - 1)); 2459 cmp(scratch, Operand(LONG_MAX - 1));
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
2552 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, 2552 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2553 Register result, 2553 Register result,
2554 DwVfpRegister double_input, 2554 DwVfpRegister double_input,
2555 Register scratch, 2555 Register scratch,
2556 DwVfpRegister double_scratch, 2556 DwVfpRegister double_scratch,
2557 CheckForInexactConversion check_inexact) { 2557 CheckForInexactConversion check_inexact) {
2558 ASSERT(!result.is(scratch)); 2558 ASSERT(!result.is(scratch));
2559 ASSERT(!double_input.is(double_scratch)); 2559 ASSERT(!double_input.is(double_scratch));
2560 2560
2561 ASSERT(CpuFeatures::IsSupported(VFP2)); 2561 ASSERT(CpuFeatures::IsSupported(VFP2));
2562 CpuFeatures::Scope scope(VFP2); 2562 CpuFeatureScope scope(this, VFP2);
2563 Register prev_fpscr = result; 2563 Register prev_fpscr = result;
2564 Label done; 2564 Label done;
2565 2565
2566 // Test for values that can be exactly represented as a signed 32-bit integer. 2566 // Test for values that can be exactly represented as a signed 32-bit integer.
2567 TryFastDoubleToInt32(result, double_input, double_scratch, &done); 2567 TryFastDoubleToInt32(result, double_input, double_scratch, &done);
2568 2568
2569 // Convert to integer, respecting rounding mode. 2569 // Convert to integer, respecting rounding mode.
2570 int32_t check_inexact_conversion = 2570 int32_t check_inexact_conversion =
2571 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; 2571 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
2572 2572
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
2678 bind(&done); 2678 bind(&done);
2679 } 2679 }
2680 2680
2681 2681
2682 void MacroAssembler::EmitECMATruncate(Register result, 2682 void MacroAssembler::EmitECMATruncate(Register result,
2683 DwVfpRegister double_input, 2683 DwVfpRegister double_input,
2684 DwVfpRegister double_scratch, 2684 DwVfpRegister double_scratch,
2685 Register scratch, 2685 Register scratch,
2686 Register input_high, 2686 Register input_high,
2687 Register input_low) { 2687 Register input_low) {
2688 CpuFeatures::Scope scope(VFP2); 2688 CpuFeatureScope scope(this, VFP2);
2689 ASSERT(!input_high.is(result)); 2689 ASSERT(!input_high.is(result));
2690 ASSERT(!input_low.is(result)); 2690 ASSERT(!input_low.is(result));
2691 ASSERT(!input_low.is(input_high)); 2691 ASSERT(!input_low.is(input_high));
2692 ASSERT(!scratch.is(result) && 2692 ASSERT(!scratch.is(result) &&
2693 !scratch.is(input_high) && 2693 !scratch.is(input_high) &&
2694 !scratch.is(input_low)); 2694 !scratch.is(input_low));
2695 ASSERT(!double_input.is(double_scratch)); 2695 ASSERT(!double_input.is(double_scratch));
2696 2696
2697 Label done; 2697 Label done;
2698 2698
(...skipping 1328 matching lines...) Expand 10 before | Expand all | Expand 10 after
4027 void CodePatcher::EmitCondition(Condition cond) { 4027 void CodePatcher::EmitCondition(Condition cond) {
4028 Instr instr = Assembler::instr_at(masm_.pc_); 4028 Instr instr = Assembler::instr_at(masm_.pc_);
4029 instr = (instr & ~kCondMask) | cond; 4029 instr = (instr & ~kCondMask) | cond;
4030 masm_.emit(instr); 4030 masm_.emit(instr);
4031 } 4031 }
4032 4032
4033 4033
4034 } } // namespace v8::internal 4034 } } // namespace v8::internal
4035 4035
4036 #endif // V8_TARGET_ARCH_ARM 4036 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/lithium-gap-resolver-arm.cc ('k') | src/arm/stub-cache-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698