OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
258 | 258 |
259 | 259 |
260 void MacroAssembler::Move(Register dst, Register src, Condition cond) { | 260 void MacroAssembler::Move(Register dst, Register src, Condition cond) { |
261 if (!dst.is(src)) { | 261 if (!dst.is(src)) { |
262 mov(dst, src, LeaveCC, cond); | 262 mov(dst, src, LeaveCC, cond); |
263 } | 263 } |
264 } | 264 } |
265 | 265 |
266 | 266 |
267 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { | 267 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { |
268 ASSERT(CpuFeatures::IsSupported(VFP3)); | 268 ASSERT(CpuFeatures::IsSupported(VFP2)); |
269 CpuFeatures::Scope scope(VFP3); | 269 CpuFeatures::Scope scope(VFP2); |
270 if (!dst.is(src)) { | 270 if (!dst.is(src)) { |
271 vmov(dst, src); | 271 vmov(dst, src); |
272 } | 272 } |
273 } | 273 } |
274 | 274 |
275 | 275 |
276 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, | 276 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, |
277 Condition cond) { | 277 Condition cond) { |
278 if (!src2.is_reg() && | 278 if (!src2.is_reg() && |
279 !src2.must_use_constant_pool() && | 279 !src2.must_use_constant_pool() && |
(...skipping 491 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
771 const Register fpscr_flags, | 771 const Register fpscr_flags, |
772 const Condition cond) { | 772 const Condition cond) { |
773 // Compare and load FPSCR. | 773 // Compare and load FPSCR. |
774 vcmp(src1, src2, cond); | 774 vcmp(src1, src2, cond); |
775 vmrs(fpscr_flags, cond); | 775 vmrs(fpscr_flags, cond); |
776 } | 776 } |
777 | 777 |
778 void MacroAssembler::Vmov(const DwVfpRegister dst, | 778 void MacroAssembler::Vmov(const DwVfpRegister dst, |
779 const double imm, | 779 const double imm, |
780 const Condition cond) { | 780 const Condition cond) { |
781 ASSERT(CpuFeatures::IsEnabled(VFP3)); | 781 ASSERT(CpuFeatures::IsEnabled(VFP2)); |
782 static const DoubleRepresentation minus_zero(-0.0); | 782 static const DoubleRepresentation minus_zero(-0.0); |
783 static const DoubleRepresentation zero(0.0); | 783 static const DoubleRepresentation zero(0.0); |
784 DoubleRepresentation value(imm); | 784 DoubleRepresentation value(imm); |
785 // Handle special values first. | 785 // Handle special values first. |
786 if (value.bits == zero.bits) { | 786 if (value.bits == zero.bits) { |
787 vmov(dst, kDoubleRegZero, cond); | 787 vmov(dst, kDoubleRegZero, cond); |
788 } else if (value.bits == minus_zero.bits) { | 788 } else if (value.bits == minus_zero.bits) { |
789 vneg(dst, kDoubleRegZero, cond); | 789 vneg(dst, kDoubleRegZero, cond); |
790 } else { | 790 } else { |
791 vmov(dst, imm, cond); | 791 vmov(dst, imm, cond); |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
923 | 923 |
924 // Tear down the exit frame, pop the arguments, and return. | 924 // Tear down the exit frame, pop the arguments, and return. |
925 mov(sp, Operand(fp)); | 925 mov(sp, Operand(fp)); |
926 ldm(ia_w, sp, fp.bit() | lr.bit()); | 926 ldm(ia_w, sp, fp.bit() | lr.bit()); |
927 if (argument_count.is_valid()) { | 927 if (argument_count.is_valid()) { |
928 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); | 928 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); |
929 } | 929 } |
930 } | 930 } |
931 | 931 |
932 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { | 932 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { |
| 933 ASSERT(CpuFeatures::IsSupported(VFP2)); |
933 if (use_eabi_hardfloat()) { | 934 if (use_eabi_hardfloat()) { |
934 Move(dst, d0); | 935 Move(dst, d0); |
935 } else { | 936 } else { |
936 vmov(dst, r0, r1); | 937 vmov(dst, r0, r1); |
937 } | 938 } |
938 } | 939 } |
939 | 940 |
940 | 941 |
941 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { | 942 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { |
942 // This macro takes the dst register to make the code more readable | 943 // This macro takes the dst register to make the code more readable |
(...skipping 1017 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1960 jmp(&have_double_value); | 1961 jmp(&have_double_value); |
1961 | 1962 |
1962 bind(&smi_value); | 1963 bind(&smi_value); |
1963 add(scratch1, elements_reg, | 1964 add(scratch1, elements_reg, |
1964 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 1965 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
1965 add(scratch1, scratch1, | 1966 add(scratch1, scratch1, |
1966 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | 1967 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
1967 // scratch1 is now effective address of the double element | 1968 // scratch1 is now effective address of the double element |
1968 | 1969 |
1969 FloatingPointHelper::Destination destination; | 1970 FloatingPointHelper::Destination destination; |
1970 if (CpuFeatures::IsSupported(VFP3)) { | 1971 if (CpuFeatures::IsSupported(VFP2)) { |
1971 destination = FloatingPointHelper::kVFPRegisters; | 1972 destination = FloatingPointHelper::kVFPRegisters; |
1972 } else { | 1973 } else { |
1973 destination = FloatingPointHelper::kCoreRegisters; | 1974 destination = FloatingPointHelper::kCoreRegisters; |
1974 } | 1975 } |
1975 | 1976 |
1976 Register untagged_value = receiver_reg; | 1977 Register untagged_value = receiver_reg; |
1977 SmiUntag(untagged_value, value_reg); | 1978 SmiUntag(untagged_value, value_reg); |
1978 FloatingPointHelper::ConvertIntToDouble(this, | 1979 FloatingPointHelper::ConvertIntToDouble(this, |
1979 untagged_value, | 1980 untagged_value, |
1980 destination, | 1981 destination, |
1981 d0, | 1982 d0, |
1982 mantissa_reg, | 1983 mantissa_reg, |
1983 exponent_reg, | 1984 exponent_reg, |
1984 scratch4, | 1985 scratch4, |
1985 s2); | 1986 s2); |
1986 if (destination == FloatingPointHelper::kVFPRegisters) { | 1987 if (destination == FloatingPointHelper::kVFPRegisters) { |
1987 CpuFeatures::Scope scope(VFP3); | 1988 CpuFeatures::Scope scope(VFP2); |
1988 vstr(d0, scratch1, 0); | 1989 vstr(d0, scratch1, 0); |
1989 } else { | 1990 } else { |
1990 str(mantissa_reg, MemOperand(scratch1, 0)); | 1991 str(mantissa_reg, MemOperand(scratch1, 0)); |
1991 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); | 1992 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); |
1992 } | 1993 } |
1993 bind(&done); | 1994 bind(&done); |
1994 } | 1995 } |
1995 | 1996 |
1996 | 1997 |
1997 void MacroAssembler::CompareMap(Register obj, | 1998 void MacroAssembler::CompareMap(Register obj, |
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2324 | 2325 |
2325 // Tries to get a signed int32 out of a double precision floating point heap | 2326 // Tries to get a signed int32 out of a double precision floating point heap |
2326 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the | 2327 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the |
2327 // 32bits signed integer range. | 2328 // 32bits signed integer range. |
2328 void MacroAssembler::ConvertToInt32(Register source, | 2329 void MacroAssembler::ConvertToInt32(Register source, |
2329 Register dest, | 2330 Register dest, |
2330 Register scratch, | 2331 Register scratch, |
2331 Register scratch2, | 2332 Register scratch2, |
2332 DwVfpRegister double_scratch, | 2333 DwVfpRegister double_scratch, |
2333 Label *not_int32) { | 2334 Label *not_int32) { |
2334 if (CpuFeatures::IsSupported(VFP3)) { | 2335 if (CpuFeatures::IsSupported(VFP2)) { |
2335 CpuFeatures::Scope scope(VFP3); | 2336 CpuFeatures::Scope scope(VFP2); |
2336 sub(scratch, source, Operand(kHeapObjectTag)); | 2337 sub(scratch, source, Operand(kHeapObjectTag)); |
2337 vldr(double_scratch, scratch, HeapNumber::kValueOffset); | 2338 vldr(double_scratch, scratch, HeapNumber::kValueOffset); |
2338 vcvt_s32_f64(double_scratch.low(), double_scratch); | 2339 vcvt_s32_f64(double_scratch.low(), double_scratch); |
2339 vmov(dest, double_scratch.low()); | 2340 vmov(dest, double_scratch.low()); |
2340 // Signed vcvt instruction will saturate to the minimum (0x80000000) or | 2341 // Signed vcvt instruction will saturate to the minimum (0x80000000) or |
2341 // maximun (0x7fffffff) signed 32bits integer when the double is out of | 2342 // maximun (0x7fffffff) signed 32bits integer when the double is out of |
2342 // range. When substracting one, the minimum signed integer becomes the | 2343 // range. When substracting one, the minimum signed integer becomes the |
2343 // maximun signed integer. | 2344 // maximun signed integer. |
2344 sub(scratch, dest, Operand(1)); | 2345 sub(scratch, dest, Operand(1)); |
2345 cmp(scratch, Operand(LONG_MAX - 1)); | 2346 cmp(scratch, Operand(LONG_MAX - 1)); |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2420 } | 2421 } |
2421 } | 2422 } |
2422 | 2423 |
2423 | 2424 |
2424 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, | 2425 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, |
2425 SwVfpRegister result, | 2426 SwVfpRegister result, |
2426 DwVfpRegister double_input, | 2427 DwVfpRegister double_input, |
2427 Register scratch1, | 2428 Register scratch1, |
2428 Register scratch2, | 2429 Register scratch2, |
2429 CheckForInexactConversion check_inexact) { | 2430 CheckForInexactConversion check_inexact) { |
2430 ASSERT(CpuFeatures::IsSupported(VFP3)); | 2431 ASSERT(CpuFeatures::IsSupported(VFP2)); |
2431 CpuFeatures::Scope scope(VFP3); | 2432 CpuFeatures::Scope scope(VFP2); |
2432 Register prev_fpscr = scratch1; | 2433 Register prev_fpscr = scratch1; |
2433 Register scratch = scratch2; | 2434 Register scratch = scratch2; |
2434 | 2435 |
2435 int32_t check_inexact_conversion = | 2436 int32_t check_inexact_conversion = |
2436 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; | 2437 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; |
2437 | 2438 |
2438 // Set custom FPCSR: | 2439 // Set custom FPCSR: |
2439 // - Set rounding mode. | 2440 // - Set rounding mode. |
2440 // - Clear vfp cumulative exception flags. | 2441 // - Clear vfp cumulative exception flags. |
2441 // - Make sure Flush-to-zero mode control bit is unset. | 2442 // - Make sure Flush-to-zero mode control bit is unset. |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2539 bind(&done); | 2540 bind(&done); |
2540 } | 2541 } |
2541 | 2542 |
2542 | 2543 |
2543 void MacroAssembler::EmitECMATruncate(Register result, | 2544 void MacroAssembler::EmitECMATruncate(Register result, |
2544 DwVfpRegister double_input, | 2545 DwVfpRegister double_input, |
2545 SwVfpRegister single_scratch, | 2546 SwVfpRegister single_scratch, |
2546 Register scratch, | 2547 Register scratch, |
2547 Register input_high, | 2548 Register input_high, |
2548 Register input_low) { | 2549 Register input_low) { |
2549 CpuFeatures::Scope scope(VFP3); | 2550 CpuFeatures::Scope scope(VFP2); |
2550 ASSERT(!input_high.is(result)); | 2551 ASSERT(!input_high.is(result)); |
2551 ASSERT(!input_low.is(result)); | 2552 ASSERT(!input_low.is(result)); |
2552 ASSERT(!input_low.is(input_high)); | 2553 ASSERT(!input_low.is(input_high)); |
2553 ASSERT(!scratch.is(result) && | 2554 ASSERT(!scratch.is(result) && |
2554 !scratch.is(input_high) && | 2555 !scratch.is(input_high) && |
2555 !scratch.is(input_low)); | 2556 !scratch.is(input_low)); |
2556 ASSERT(!single_scratch.is(double_input.low()) && | 2557 ASSERT(!single_scratch.is(double_input.low()) && |
2557 !single_scratch.is(double_input.high())); | 2558 !single_scratch.is(double_input.high())); |
2558 | 2559 |
2559 Label done; | 2560 Label done; |
(...skipping 765 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3325 } | 3326 } |
3326 | 3327 |
3327 | 3328 |
3328 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3329 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3329 Register scratch) { | 3330 Register scratch) { |
3330 PrepareCallCFunction(num_reg_arguments, 0, scratch); | 3331 PrepareCallCFunction(num_reg_arguments, 0, scratch); |
3331 } | 3332 } |
3332 | 3333 |
3333 | 3334 |
3334 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { | 3335 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { |
| 3336 ASSERT(CpuFeatures::IsSupported(VFP2)); |
3335 if (use_eabi_hardfloat()) { | 3337 if (use_eabi_hardfloat()) { |
3336 Move(d0, dreg); | 3338 Move(d0, dreg); |
3337 } else { | 3339 } else { |
3338 vmov(r0, r1, dreg); | 3340 vmov(r0, r1, dreg); |
3339 } | 3341 } |
3340 } | 3342 } |
3341 | 3343 |
3342 | 3344 |
3343 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, | 3345 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, |
3344 DoubleRegister dreg2) { | 3346 DoubleRegister dreg2) { |
| 3347 ASSERT(CpuFeatures::IsSupported(VFP2)); |
3345 if (use_eabi_hardfloat()) { | 3348 if (use_eabi_hardfloat()) { |
3346 if (dreg2.is(d0)) { | 3349 if (dreg2.is(d0)) { |
3347 ASSERT(!dreg1.is(d1)); | 3350 ASSERT(!dreg1.is(d1)); |
3348 Move(d1, dreg2); | 3351 Move(d1, dreg2); |
3349 Move(d0, dreg1); | 3352 Move(d0, dreg1); |
3350 } else { | 3353 } else { |
3351 Move(d0, dreg1); | 3354 Move(d0, dreg1); |
3352 Move(d1, dreg2); | 3355 Move(d1, dreg2); |
3353 } | 3356 } |
3354 } else { | 3357 } else { |
3355 vmov(r0, r1, dreg1); | 3358 vmov(r0, r1, dreg1); |
3356 vmov(r2, r3, dreg2); | 3359 vmov(r2, r3, dreg2); |
3357 } | 3360 } |
3358 } | 3361 } |
3359 | 3362 |
3360 | 3363 |
3361 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, | 3364 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, |
3362 Register reg) { | 3365 Register reg) { |
| 3366 ASSERT(CpuFeatures::IsSupported(VFP2)); |
3363 if (use_eabi_hardfloat()) { | 3367 if (use_eabi_hardfloat()) { |
3364 Move(d0, dreg); | 3368 Move(d0, dreg); |
3365 Move(r0, reg); | 3369 Move(r0, reg); |
3366 } else { | 3370 } else { |
3367 Move(r2, reg); | 3371 Move(r2, reg); |
3368 vmov(r0, r1, dreg); | 3372 vmov(r0, r1, dreg); |
3369 } | 3373 } |
3370 } | 3374 } |
3371 | 3375 |
3372 | 3376 |
(...skipping 428 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3801 void CodePatcher::EmitCondition(Condition cond) { | 3805 void CodePatcher::EmitCondition(Condition cond) { |
3802 Instr instr = Assembler::instr_at(masm_.pc_); | 3806 Instr instr = Assembler::instr_at(masm_.pc_); |
3803 instr = (instr & ~kCondMask) | cond; | 3807 instr = (instr & ~kCondMask) | cond; |
3804 masm_.emit(instr); | 3808 masm_.emit(instr); |
3805 } | 3809 } |
3806 | 3810 |
3807 | 3811 |
3808 } } // namespace v8::internal | 3812 } } // namespace v8::internal |
3809 | 3813 |
3810 #endif // V8_TARGET_ARCH_ARM | 3814 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |