Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(547)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 12391055: Cleaned up CpuFeature scope handling. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: ARM and MIPS support Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
285 285
286 void MacroAssembler::Move(Register dst, Register src, Condition cond) { 286 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
287 if (!dst.is(src)) { 287 if (!dst.is(src)) {
288 mov(dst, src, LeaveCC, cond); 288 mov(dst, src, LeaveCC, cond);
289 } 289 }
290 } 290 }
291 291
292 292
293 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { 293 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
294 ASSERT(CpuFeatures::IsSupported(VFP2)); 294 ASSERT(CpuFeatures::IsSupported(VFP2));
295 CpuFeatures::Scope scope(VFP2); 295 CpuFeatureScope scope(this, VFP2);
296 if (!dst.is(src)) { 296 if (!dst.is(src)) {
297 vmov(dst, src); 297 vmov(dst, src);
298 } 298 }
299 } 299 }
300 300
301 301
302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, 302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
303 Condition cond) { 303 Condition cond) {
304 if (!src2.is_reg() && 304 if (!src2.is_reg() &&
305 !src2.must_output_reloc_info(this) && 305 !src2.must_output_reloc_info(this) &&
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after
710 ASSERT(!dst1.is(lr)); // r14. 710 ASSERT(!dst1.is(lr)); // r14.
711 ASSERT_EQ(0, dst1.code() % 2); 711 ASSERT_EQ(0, dst1.code() % 2);
712 ASSERT_EQ(dst1.code() + 1, dst2.code()); 712 ASSERT_EQ(dst1.code() + 1, dst2.code());
713 713
714 // V8 does not use this addressing mode, so the fallback code 714 // V8 does not use this addressing mode, so the fallback code
715 // below doesn't support it yet. 715 // below doesn't support it yet.
716 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); 716 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
717 717
718 // Generate two ldr instructions if ldrd is not available. 718 // Generate two ldr instructions if ldrd is not available.
719 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { 719 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
720 CpuFeatures::Scope scope(ARMv7); 720 CpuFeatureScope scope(this, ARMv7);
721 ldrd(dst1, dst2, src, cond); 721 ldrd(dst1, dst2, src, cond);
722 } else { 722 } else {
723 if ((src.am() == Offset) || (src.am() == NegOffset)) { 723 if ((src.am() == Offset) || (src.am() == NegOffset)) {
724 MemOperand src2(src); 724 MemOperand src2(src);
725 src2.set_offset(src2.offset() + 4); 725 src2.set_offset(src2.offset() + 4);
726 if (dst1.is(src.rn())) { 726 if (dst1.is(src.rn())) {
727 ldr(dst2, src2, cond); 727 ldr(dst2, src2, cond);
728 ldr(dst1, src, cond); 728 ldr(dst1, src, cond);
729 } else { 729 } else {
730 ldr(dst1, src, cond); 730 ldr(dst1, src, cond);
(...skipping 21 matching lines...) Expand all
752 ASSERT(!src1.is(lr)); // r14. 752 ASSERT(!src1.is(lr)); // r14.
753 ASSERT_EQ(0, src1.code() % 2); 753 ASSERT_EQ(0, src1.code() % 2);
754 ASSERT_EQ(src1.code() + 1, src2.code()); 754 ASSERT_EQ(src1.code() + 1, src2.code());
755 755
756 // V8 does not use this addressing mode, so the fallback code 756 // V8 does not use this addressing mode, so the fallback code
757 // below doesn't support it yet. 757 // below doesn't support it yet.
758 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); 758 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
759 759
760 // Generate two str instructions if strd is not available. 760 // Generate two str instructions if strd is not available.
761 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { 761 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
762 CpuFeatures::Scope scope(ARMv7); 762 CpuFeatureScope scope(this, ARMv7);
763 strd(src1, src2, dst, cond); 763 strd(src1, src2, dst, cond);
764 } else { 764 } else {
765 MemOperand dst2(dst); 765 MemOperand dst2(dst);
766 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { 766 if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
767 dst2.set_offset(dst2.offset() + 4); 767 dst2.set_offset(dst2.offset() + 4);
768 str(src1, dst, cond); 768 str(src1, dst, cond);
769 str(src2, dst2, cond); 769 str(src2, dst2, cond);
770 } else { // PostIndex or NegPostIndex. 770 } else { // PostIndex or NegPostIndex.
771 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); 771 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
772 dst2.set_offset(dst2.offset() - 4); 772 dst2.set_offset(dst2.offset() - 4);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
806 const Register fpscr_flags, 806 const Register fpscr_flags,
807 const Condition cond) { 807 const Condition cond) {
808 // Compare and load FPSCR. 808 // Compare and load FPSCR.
809 vcmp(src1, src2, cond); 809 vcmp(src1, src2, cond);
810 vmrs(fpscr_flags, cond); 810 vmrs(fpscr_flags, cond);
811 } 811 }
812 812
813 void MacroAssembler::Vmov(const DwVfpRegister dst, 813 void MacroAssembler::Vmov(const DwVfpRegister dst,
814 const double imm, 814 const double imm,
815 const Register scratch) { 815 const Register scratch) {
816 ASSERT(CpuFeatures::IsEnabled(VFP2)); 816 ASSERT(IsEnabled(VFP2));
817 static const DoubleRepresentation minus_zero(-0.0); 817 static const DoubleRepresentation minus_zero(-0.0);
818 static const DoubleRepresentation zero(0.0); 818 static const DoubleRepresentation zero(0.0);
819 DoubleRepresentation value(imm); 819 DoubleRepresentation value(imm);
820 // Handle special values first. 820 // Handle special values first.
821 if (value.bits == zero.bits) { 821 if (value.bits == zero.bits) {
822 vmov(dst, kDoubleRegZero); 822 vmov(dst, kDoubleRegZero);
823 } else if (value.bits == minus_zero.bits) { 823 } else if (value.bits == minus_zero.bits) {
824 vneg(dst, kDoubleRegZero); 824 vneg(dst, kDoubleRegZero);
825 } else { 825 } else {
826 vmov(dst, imm, scratch); 826 vmov(dst, imm, scratch);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
868 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); 868 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
869 869
870 // Save the frame pointer and the context in top. 870 // Save the frame pointer and the context in top.
871 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 871 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
872 str(fp, MemOperand(ip)); 872 str(fp, MemOperand(ip));
873 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 873 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
874 str(cp, MemOperand(ip)); 874 str(cp, MemOperand(ip));
875 875
876 // Optionally save all double registers. 876 // Optionally save all double registers.
877 if (save_doubles) { 877 if (save_doubles) {
878 CpuFeatures::Scope scope(VFP2); 878 CpuFeatureScope scope(this, VFP2);
879 // Check CPU flags for number of registers, setting the Z condition flag. 879 // Check CPU flags for number of registers, setting the Z condition flag.
880 CheckFor32DRegs(ip); 880 CheckFor32DRegs(ip);
881 881
882 // Push registers d0-d15, and possibly d16-d31, on the stack. 882 // Push registers d0-d15, and possibly d16-d31, on the stack.
883 // If d16-d31 are not pushed, decrease the stack pointer instead. 883 // If d16-d31 are not pushed, decrease the stack pointer instead.
884 vstm(db_w, sp, d16, d31, ne); 884 vstm(db_w, sp, d16, d31, ne);
885 sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); 885 sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
886 vstm(db_w, sp, d0, d15); 886 vstm(db_w, sp, d0, d15);
887 // Note that d0 will be accessible at 887 // Note that d0 will be accessible at
888 // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize, 888 // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
933 // flag. 933 // flag.
934 return FLAG_sim_stack_alignment; 934 return FLAG_sim_stack_alignment;
935 #endif // defined(V8_HOST_ARCH_ARM) 935 #endif // defined(V8_HOST_ARCH_ARM)
936 } 936 }
937 937
938 938
939 void MacroAssembler::LeaveExitFrame(bool save_doubles, 939 void MacroAssembler::LeaveExitFrame(bool save_doubles,
940 Register argument_count) { 940 Register argument_count) {
941 // Optionally restore all double registers. 941 // Optionally restore all double registers.
942 if (save_doubles) { 942 if (save_doubles) {
943 CpuFeatures::Scope scope(VFP2); 943 CpuFeatureScope scope(this, VFP2);
944 // Calculate the stack location of the saved doubles and restore them. 944 // Calculate the stack location of the saved doubles and restore them.
945 const int offset = 2 * kPointerSize; 945 const int offset = 2 * kPointerSize;
946 sub(r3, fp, 946 sub(r3, fp,
947 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); 947 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
948 948
949 // Check CPU flags for number of registers, setting the Z condition flag. 949 // Check CPU flags for number of registers, setting the Z condition flag.
950 CheckFor32DRegs(ip); 950 CheckFor32DRegs(ip);
951 951
952 // Pop registers d0-d15, and possibly d16-d31, from r3. 952 // Pop registers d0-d15, and possibly d16-d31, from r3.
953 // If d16-d31 are not popped, increase r3 instead. 953 // If d16-d31 are not popped, increase r3 instead.
(...skipping 1109 matching lines...) Expand 10 before | Expand all | Expand 10 after
2063 SmiUntag(untagged_value, value_reg); 2063 SmiUntag(untagged_value, value_reg);
2064 FloatingPointHelper::ConvertIntToDouble(this, 2064 FloatingPointHelper::ConvertIntToDouble(this,
2065 untagged_value, 2065 untagged_value,
2066 destination, 2066 destination,
2067 d0, 2067 d0,
2068 mantissa_reg, 2068 mantissa_reg,
2069 exponent_reg, 2069 exponent_reg,
2070 scratch4, 2070 scratch4,
2071 s2); 2071 s2);
2072 if (destination == FloatingPointHelper::kVFPRegisters) { 2072 if (destination == FloatingPointHelper::kVFPRegisters) {
2073 CpuFeatures::Scope scope(VFP2); 2073 CpuFeatureScope scope(this, VFP2);
2074 vstr(d0, scratch1, 0); 2074 vstr(d0, scratch1, 0);
2075 } else { 2075 } else {
2076 str(mantissa_reg, MemOperand(scratch1, 0)); 2076 str(mantissa_reg, MemOperand(scratch1, 0));
2077 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); 2077 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
2078 } 2078 }
2079 bind(&done); 2079 bind(&done);
2080 } 2080 }
2081 2081
2082 2082
2083 void MacroAssembler::CompareMap(Register obj, 2083 void MacroAssembler::CompareMap(Register obj,
(...skipping 345 matching lines...) Expand 10 before | Expand all | Expand 10 after
2429 // Tries to get a signed int32 out of a double precision floating point heap 2429 // Tries to get a signed int32 out of a double precision floating point heap
2430 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the 2430 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
2431 // 32bits signed integer range. 2431 // 32bits signed integer range.
2432 void MacroAssembler::ConvertToInt32(Register source, 2432 void MacroAssembler::ConvertToInt32(Register source,
2433 Register dest, 2433 Register dest,
2434 Register scratch, 2434 Register scratch,
2435 Register scratch2, 2435 Register scratch2,
2436 DwVfpRegister double_scratch, 2436 DwVfpRegister double_scratch,
2437 Label *not_int32) { 2437 Label *not_int32) {
2438 if (CpuFeatures::IsSupported(VFP2)) { 2438 if (CpuFeatures::IsSupported(VFP2)) {
2439 CpuFeatures::Scope scope(VFP2); 2439 CpuFeatureScope scope(this, VFP2);
2440 sub(scratch, source, Operand(kHeapObjectTag)); 2440 sub(scratch, source, Operand(kHeapObjectTag));
2441 vldr(double_scratch, scratch, HeapNumber::kValueOffset); 2441 vldr(double_scratch, scratch, HeapNumber::kValueOffset);
2442 vcvt_s32_f64(double_scratch.low(), double_scratch); 2442 vcvt_s32_f64(double_scratch.low(), double_scratch);
2443 vmov(dest, double_scratch.low()); 2443 vmov(dest, double_scratch.low());
2444 // Signed vcvt instruction will saturate to the minimum (0x80000000) or 2444 // Signed vcvt instruction will saturate to the minimum (0x80000000) or
2445 // maximun (0x7fffffff) signed 32bits integer when the double is out of 2445 // maximun (0x7fffffff) signed 32bits integer when the double is out of
2446 // range. When substracting one, the minimum signed integer becomes the 2446 // range. When substracting one, the minimum signed integer becomes the
2447 // maximun signed integer. 2447 // maximun signed integer.
2448 sub(scratch, dest, Operand(1)); 2448 sub(scratch, dest, Operand(1));
2449 cmp(scratch, Operand(LONG_MAX - 1)); 2449 cmp(scratch, Operand(LONG_MAX - 1));
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
2542 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, 2542 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2543 Register result, 2543 Register result,
2544 DwVfpRegister double_input, 2544 DwVfpRegister double_input,
2545 Register scratch, 2545 Register scratch,
2546 DwVfpRegister double_scratch, 2546 DwVfpRegister double_scratch,
2547 CheckForInexactConversion check_inexact) { 2547 CheckForInexactConversion check_inexact) {
2548 ASSERT(!result.is(scratch)); 2548 ASSERT(!result.is(scratch));
2549 ASSERT(!double_input.is(double_scratch)); 2549 ASSERT(!double_input.is(double_scratch));
2550 2550
2551 ASSERT(CpuFeatures::IsSupported(VFP2)); 2551 ASSERT(CpuFeatures::IsSupported(VFP2));
2552 CpuFeatures::Scope scope(VFP2); 2552 CpuFeatureScope scope(this, VFP2);
2553 Register prev_fpscr = result; 2553 Register prev_fpscr = result;
2554 Label done; 2554 Label done;
2555 2555
2556 // Test for values that can be exactly represented as a signed 32-bit integer. 2556 // Test for values that can be exactly represented as a signed 32-bit integer.
2557 TryFastDoubleToInt32(result, double_input, double_scratch, &done); 2557 TryFastDoubleToInt32(result, double_input, double_scratch, &done);
2558 2558
2559 // Convert to integer, respecting rounding mode. 2559 // Convert to integer, respecting rounding mode.
2560 int32_t check_inexact_conversion = 2560 int32_t check_inexact_conversion =
2561 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; 2561 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
2562 2562
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
2668 bind(&done); 2668 bind(&done);
2669 } 2669 }
2670 2670
2671 2671
2672 void MacroAssembler::EmitECMATruncate(Register result, 2672 void MacroAssembler::EmitECMATruncate(Register result,
2673 DwVfpRegister double_input, 2673 DwVfpRegister double_input,
2674 DwVfpRegister double_scratch, 2674 DwVfpRegister double_scratch,
2675 Register scratch, 2675 Register scratch,
2676 Register input_high, 2676 Register input_high,
2677 Register input_low) { 2677 Register input_low) {
2678 CpuFeatures::Scope scope(VFP2); 2678 CpuFeatureScope scope(this, VFP2);
2679 ASSERT(!input_high.is(result)); 2679 ASSERT(!input_high.is(result));
2680 ASSERT(!input_low.is(result)); 2680 ASSERT(!input_low.is(result));
2681 ASSERT(!input_low.is(input_high)); 2681 ASSERT(!input_low.is(input_high));
2682 ASSERT(!scratch.is(result) && 2682 ASSERT(!scratch.is(result) &&
2683 !scratch.is(input_high) && 2683 !scratch.is(input_high) &&
2684 !scratch.is(input_low)); 2684 !scratch.is(input_low));
2685 ASSERT(!double_input.is(double_scratch)); 2685 ASSERT(!double_input.is(double_scratch));
2686 2686
2687 Label done; 2687 Label done;
2688 2688
(...skipping 1314 matching lines...) Expand 10 before | Expand all | Expand 10 after
4003 void CodePatcher::EmitCondition(Condition cond) { 4003 void CodePatcher::EmitCondition(Condition cond) {
4004 Instr instr = Assembler::instr_at(masm_.pc_); 4004 Instr instr = Assembler::instr_at(masm_.pc_);
4005 instr = (instr & ~kCondMask) | cond; 4005 instr = (instr & ~kCondMask) | cond;
4006 masm_.emit(instr); 4006 masm_.emit(instr);
4007 } 4007 }
4008 4008
4009 4009
4010 } } // namespace v8::internal 4010 } } // namespace v8::internal
4011 4011
4012 #endif // V8_TARGET_ARCH_ARM 4012 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/lithium-gap-resolver-arm.cc ('k') | src/arm/stub-cache-arm.cc » ('j') | src/assembler.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698