OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
284 | 284 |
285 | 285 |
286 void MacroAssembler::Move(Register dst, Register src, Condition cond) { | 286 void MacroAssembler::Move(Register dst, Register src, Condition cond) { |
287 if (!dst.is(src)) { | 287 if (!dst.is(src)) { |
288 mov(dst, src, LeaveCC, cond); | 288 mov(dst, src, LeaveCC, cond); |
289 } | 289 } |
290 } | 290 } |
291 | 291 |
292 | 292 |
293 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { | 293 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { |
294 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
295 CpuFeatureScope scope(this, VFP2); | |
296 if (!dst.is(src)) { | 294 if (!dst.is(src)) { |
297 vmov(dst, src); | 295 vmov(dst, src); |
298 } | 296 } |
299 } | 297 } |
300 | 298 |
301 | 299 |
302 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, | 300 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, |
303 Condition cond) { | 301 Condition cond) { |
304 if (!src2.is_reg() && | 302 if (!src2.is_reg() && |
305 !src2.must_output_reloc_info(this) && | 303 !src2.must_output_reloc_info(this) && |
(...skipping 498 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
804 const Register fpscr_flags, | 802 const Register fpscr_flags, |
805 const Condition cond) { | 803 const Condition cond) { |
806 // Compare and load FPSCR. | 804 // Compare and load FPSCR. |
807 vcmp(src1, src2, cond); | 805 vcmp(src1, src2, cond); |
808 vmrs(fpscr_flags, cond); | 806 vmrs(fpscr_flags, cond); |
809 } | 807 } |
810 | 808 |
811 void MacroAssembler::Vmov(const DwVfpRegister dst, | 809 void MacroAssembler::Vmov(const DwVfpRegister dst, |
812 const double imm, | 810 const double imm, |
813 const Register scratch) { | 811 const Register scratch) { |
814 ASSERT(IsEnabled(VFP2)); | |
815 static const DoubleRepresentation minus_zero(-0.0); | 812 static const DoubleRepresentation minus_zero(-0.0); |
816 static const DoubleRepresentation zero(0.0); | 813 static const DoubleRepresentation zero(0.0); |
817 DoubleRepresentation value(imm); | 814 DoubleRepresentation value(imm); |
818 // Handle special values first. | 815 // Handle special values first. |
819 if (value.bits == zero.bits) { | 816 if (value.bits == zero.bits) { |
820 vmov(dst, kDoubleRegZero); | 817 vmov(dst, kDoubleRegZero); |
821 } else if (value.bits == minus_zero.bits) { | 818 } else if (value.bits == minus_zero.bits) { |
822 vneg(dst, kDoubleRegZero); | 819 vneg(dst, kDoubleRegZero); |
823 } else { | 820 } else { |
824 vmov(dst, imm, scratch); | 821 vmov(dst, imm, scratch); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
866 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); | 863 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); |
867 | 864 |
868 // Save the frame pointer and the context in top. | 865 // Save the frame pointer and the context in top. |
869 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 866 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
870 str(fp, MemOperand(ip)); | 867 str(fp, MemOperand(ip)); |
871 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 868 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
872 str(cp, MemOperand(ip)); | 869 str(cp, MemOperand(ip)); |
873 | 870 |
874 // Optionally save all double registers. | 871 // Optionally save all double registers. |
875 if (save_doubles) { | 872 if (save_doubles) { |
876 CpuFeatureScope scope(this, VFP2); | |
877 // Check CPU flags for number of registers, setting the Z condition flag. | 873 // Check CPU flags for number of registers, setting the Z condition flag. |
878 CheckFor32DRegs(ip); | 874 CheckFor32DRegs(ip); |
879 | 875 |
880 // Push registers d0-d15, and possibly d16-d31, on the stack. | 876 // Push registers d0-d15, and possibly d16-d31, on the stack. |
881 // If d16-d31 are not pushed, decrease the stack pointer instead. | 877 // If d16-d31 are not pushed, decrease the stack pointer instead. |
882 vstm(db_w, sp, d16, d31, ne); | 878 vstm(db_w, sp, d16, d31, ne); |
883 sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); | 879 sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); |
884 vstm(db_w, sp, d0, d15); | 880 vstm(db_w, sp, d0, d15); |
885 // Note that d0 will be accessible at | 881 // Note that d0 will be accessible at |
886 // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize, | 882 // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize, |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
931 // flag. | 927 // flag. |
932 return FLAG_sim_stack_alignment; | 928 return FLAG_sim_stack_alignment; |
933 #endif // defined(V8_HOST_ARCH_ARM) | 929 #endif // defined(V8_HOST_ARCH_ARM) |
934 } | 930 } |
935 | 931 |
936 | 932 |
937 void MacroAssembler::LeaveExitFrame(bool save_doubles, | 933 void MacroAssembler::LeaveExitFrame(bool save_doubles, |
938 Register argument_count) { | 934 Register argument_count) { |
939 // Optionally restore all double registers. | 935 // Optionally restore all double registers. |
940 if (save_doubles) { | 936 if (save_doubles) { |
941 CpuFeatureScope scope(this, VFP2); | |
942 // Calculate the stack location of the saved doubles and restore them. | 937 // Calculate the stack location of the saved doubles and restore them. |
943 const int offset = 2 * kPointerSize; | 938 const int offset = 2 * kPointerSize; |
944 sub(r3, fp, | 939 sub(r3, fp, |
945 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); | 940 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); |
946 | 941 |
947 // Check CPU flags for number of registers, setting the Z condition flag. | 942 // Check CPU flags for number of registers, setting the Z condition flag. |
948 CheckFor32DRegs(ip); | 943 CheckFor32DRegs(ip); |
949 | 944 |
950 // Pop registers d0-d15, and possibly d16-d31, from r3. | 945 // Pop registers d0-d15, and possibly d16-d31, from r3. |
951 // If d16-d31 are not popped, increase r3 instead. | 946 // If d16-d31 are not popped, increase r3 instead. |
(...skipping 16 matching lines...) Expand all Loading... |
968 | 963 |
969 // Tear down the exit frame, pop the arguments, and return. | 964 // Tear down the exit frame, pop the arguments, and return. |
970 mov(sp, Operand(fp)); | 965 mov(sp, Operand(fp)); |
971 ldm(ia_w, sp, fp.bit() | lr.bit()); | 966 ldm(ia_w, sp, fp.bit() | lr.bit()); |
972 if (argument_count.is_valid()) { | 967 if (argument_count.is_valid()) { |
973 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); | 968 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); |
974 } | 969 } |
975 } | 970 } |
976 | 971 |
977 void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) { | 972 void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) { |
978 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
979 if (use_eabi_hardfloat()) { | 973 if (use_eabi_hardfloat()) { |
980 Move(dst, d0); | 974 Move(dst, d0); |
981 } else { | 975 } else { |
982 vmov(dst, r0, r1); | 976 vmov(dst, r0, r1); |
983 } | 977 } |
984 } | 978 } |
985 | 979 |
986 | 980 |
987 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { | 981 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { |
988 // This macro takes the dst register to make the code more readable | 982 // This macro takes the dst register to make the code more readable |
(...skipping 1050 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2039 | 2033 |
2040 bind(&smi_value); | 2034 bind(&smi_value); |
2041 add(scratch1, elements_reg, | 2035 add(scratch1, elements_reg, |
2042 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - | 2036 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - |
2043 elements_offset)); | 2037 elements_offset)); |
2044 add(scratch1, scratch1, | 2038 add(scratch1, scratch1, |
2045 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | 2039 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
2046 // scratch1 is now effective address of the double element | 2040 // scratch1 is now effective address of the double element |
2047 | 2041 |
2048 FloatingPointHelper::Destination destination; | 2042 FloatingPointHelper::Destination destination; |
2049 if (CpuFeatures::IsSupported(VFP2)) { | 2043 destination = FloatingPointHelper::kVFPRegisters; |
2050 destination = FloatingPointHelper::kVFPRegisters; | |
2051 } else { | |
2052 destination = FloatingPointHelper::kCoreRegisters; | |
2053 } | |
2054 | 2044 |
2055 Register untagged_value = elements_reg; | 2045 Register untagged_value = elements_reg; |
2056 SmiUntag(untagged_value, value_reg); | 2046 SmiUntag(untagged_value, value_reg); |
2057 FloatingPointHelper::ConvertIntToDouble(this, | 2047 FloatingPointHelper::ConvertIntToDouble(this, |
2058 untagged_value, | 2048 untagged_value, |
2059 destination, | 2049 destination, |
2060 d0, | 2050 d0, |
2061 mantissa_reg, | 2051 mantissa_reg, |
2062 exponent_reg, | 2052 exponent_reg, |
2063 scratch4, | 2053 scratch4, |
2064 s2); | 2054 s2); |
2065 if (destination == FloatingPointHelper::kVFPRegisters) { | 2055 if (destination == FloatingPointHelper::kVFPRegisters) { |
2066 CpuFeatureScope scope(this, VFP2); | |
2067 vstr(d0, scratch1, 0); | 2056 vstr(d0, scratch1, 0); |
2068 } else { | 2057 } else { |
2069 str(mantissa_reg, MemOperand(scratch1, 0)); | 2058 str(mantissa_reg, MemOperand(scratch1, 0)); |
2070 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); | 2059 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); |
2071 } | 2060 } |
2072 bind(&done); | 2061 bind(&done); |
2073 } | 2062 } |
2074 | 2063 |
2075 | 2064 |
2076 void MacroAssembler::CompareMap(Register obj, | 2065 void MacroAssembler::CompareMap(Register obj, |
(...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2416 SwVfpRegister scratch2) { | 2405 SwVfpRegister scratch2) { |
2417 mov(scratch1, Operand(smi, ASR, kSmiTagSize)); | 2406 mov(scratch1, Operand(smi, ASR, kSmiTagSize)); |
2418 vmov(scratch2, scratch1); | 2407 vmov(scratch2, scratch1); |
2419 vcvt_f64_s32(value, scratch2); | 2408 vcvt_f64_s32(value, scratch2); |
2420 } | 2409 } |
2421 | 2410 |
2422 | 2411 |
2423 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, | 2412 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, |
2424 DwVfpRegister double_scratch) { | 2413 DwVfpRegister double_scratch) { |
2425 ASSERT(!double_input.is(double_scratch)); | 2414 ASSERT(!double_input.is(double_scratch)); |
2426 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
2427 CpuFeatureScope scope(this, VFP2); | |
2428 | |
2429 vcvt_s32_f64(double_scratch.low(), double_input); | 2415 vcvt_s32_f64(double_scratch.low(), double_input); |
2430 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2416 vcvt_f64_s32(double_scratch, double_scratch.low()); |
2431 VFPCompareAndSetFlags(double_input, double_scratch); | 2417 VFPCompareAndSetFlags(double_input, double_scratch); |
2432 } | 2418 } |
2433 | 2419 |
2434 | 2420 |
2435 void MacroAssembler::TryDoubleToInt32Exact(Register result, | 2421 void MacroAssembler::TryDoubleToInt32Exact(Register result, |
2436 DwVfpRegister double_input, | 2422 DwVfpRegister double_input, |
2437 DwVfpRegister double_scratch) { | 2423 DwVfpRegister double_scratch) { |
2438 ASSERT(!double_input.is(double_scratch)); | 2424 ASSERT(!double_input.is(double_scratch)); |
2439 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
2440 CpuFeatureScope scope(this, VFP2); | |
2441 | |
2442 vcvt_s32_f64(double_scratch.low(), double_input); | 2425 vcvt_s32_f64(double_scratch.low(), double_input); |
2443 vmov(result, double_scratch.low()); | 2426 vmov(result, double_scratch.low()); |
2444 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2427 vcvt_f64_s32(double_scratch, double_scratch.low()); |
2445 VFPCompareAndSetFlags(double_input, double_scratch); | 2428 VFPCompareAndSetFlags(double_input, double_scratch); |
2446 } | 2429 } |
2447 | 2430 |
2448 | 2431 |
2449 void MacroAssembler::TryInt32Floor(Register result, | 2432 void MacroAssembler::TryInt32Floor(Register result, |
2450 DwVfpRegister double_input, | 2433 DwVfpRegister double_input, |
2451 Register input_high, | 2434 Register input_high, |
2452 DwVfpRegister double_scratch, | 2435 DwVfpRegister double_scratch, |
2453 Label* done, | 2436 Label* done, |
2454 Label* exact) { | 2437 Label* exact) { |
2455 ASSERT(!result.is(input_high)); | 2438 ASSERT(!result.is(input_high)); |
2456 ASSERT(!double_input.is(double_scratch)); | 2439 ASSERT(!double_input.is(double_scratch)); |
2457 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
2458 CpuFeatureScope scope(this, VFP2); | |
2459 Label negative, exception; | 2440 Label negative, exception; |
2460 | 2441 |
2461 // Test for NaN and infinities. | 2442 // Test for NaN and infinities. |
2462 Sbfx(result, input_high, | 2443 Sbfx(result, input_high, |
2463 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 2444 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
2464 cmp(result, Operand(-1)); | 2445 cmp(result, Operand(-1)); |
2465 b(eq, &exception); | 2446 b(eq, &exception); |
2466 // Test for values that can be exactly represented as a | 2447 // Test for values that can be exactly represented as a |
2467 // signed 32-bit integer. | 2448 // signed 32-bit integer. |
2468 TryDoubleToInt32Exact(result, double_input, double_scratch); | 2449 TryDoubleToInt32Exact(result, double_input, double_scratch); |
(...skipping 24 matching lines...) Expand all Loading... |
2493 } | 2474 } |
2494 | 2475 |
2495 | 2476 |
2496 void MacroAssembler::ECMAConvertNumberToInt32(Register source, | 2477 void MacroAssembler::ECMAConvertNumberToInt32(Register source, |
2497 Register result, | 2478 Register result, |
2498 Register input_low, | 2479 Register input_low, |
2499 Register input_high, | 2480 Register input_high, |
2500 Register scratch, | 2481 Register scratch, |
2501 DwVfpRegister double_scratch1, | 2482 DwVfpRegister double_scratch1, |
2502 DwVfpRegister double_scratch2) { | 2483 DwVfpRegister double_scratch2) { |
2503 if (CpuFeatures::IsSupported(VFP2)) { | 2484 vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); |
2504 CpuFeatureScope scope(this, VFP2); | 2485 ECMAToInt32(result, double_scratch1, double_scratch2, |
2505 vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); | 2486 scratch, input_high, input_low); |
2506 ECMAToInt32VFP(result, double_scratch1, double_scratch2, | |
2507 scratch, input_high, input_low); | |
2508 } else { | |
2509 Ldrd(input_low, input_high, | |
2510 FieldMemOperand(source, HeapNumber::kValueOffset)); | |
2511 ECMAToInt32NoVFP(result, scratch, input_high, input_low); | |
2512 } | |
2513 } | 2487 } |
2514 | 2488 |
2515 | 2489 |
2516 void MacroAssembler::ECMAToInt32VFP(Register result, | 2490 void MacroAssembler::ECMAToInt32(Register result, |
2517 DwVfpRegister double_input, | 2491 DwVfpRegister double_input, |
2518 DwVfpRegister double_scratch, | 2492 DwVfpRegister double_scratch, |
2519 Register scratch, | 2493 Register scratch, |
2520 Register input_high, | 2494 Register input_high, |
2521 Register input_low) { | 2495 Register input_low) { |
2522 CpuFeatureScope scope(this, VFP2); | |
2523 ASSERT(!input_high.is(result)); | 2496 ASSERT(!input_high.is(result)); |
2524 ASSERT(!input_low.is(result)); | 2497 ASSERT(!input_low.is(result)); |
2525 ASSERT(!input_low.is(input_high)); | 2498 ASSERT(!input_low.is(input_high)); |
2526 ASSERT(!scratch.is(result) && | 2499 ASSERT(!scratch.is(result) && |
2527 !scratch.is(input_high) && | 2500 !scratch.is(input_high) && |
2528 !scratch.is(input_low)); | 2501 !scratch.is(input_low)); |
2529 ASSERT(!double_input.is(double_scratch)); | 2502 ASSERT(!double_input.is(double_scratch)); |
2530 | 2503 |
2531 Label out_of_range, negate, done; | 2504 Label out_of_range, negate, done; |
2532 | 2505 |
(...skipping 19 matching lines...) Expand all Loading... |
2552 b(ge, &out_of_range); | 2525 b(ge, &out_of_range); |
2553 | 2526 |
2554 // If we reach this code, 31 <= exponent <= 83. | 2527 // If we reach this code, 31 <= exponent <= 83. |
2555 // So, we don't have to handle cases where 0 <= exponent <= 20 for | 2528 // So, we don't have to handle cases where 0 <= exponent <= 20 for |
2556 // which we would need to shift right the high part of the mantissa. | 2529 // which we would need to shift right the high part of the mantissa. |
2557 ECMAToInt32Tail(result, scratch, input_high, input_low, | 2530 ECMAToInt32Tail(result, scratch, input_high, input_low, |
2558 &out_of_range, &negate, &done); | 2531 &out_of_range, &negate, &done); |
2559 } | 2532 } |
2560 | 2533 |
2561 | 2534 |
2562 void MacroAssembler::ECMAToInt32NoVFP(Register result, | |
2563 Register scratch, | |
2564 Register input_high, | |
2565 Register input_low) { | |
2566 ASSERT(!result.is(scratch)); | |
2567 ASSERT(!result.is(input_high)); | |
2568 ASSERT(!result.is(input_low)); | |
2569 ASSERT(!scratch.is(input_high)); | |
2570 ASSERT(!scratch.is(input_low)); | |
2571 ASSERT(!input_high.is(input_low)); | |
2572 | |
2573 Label both, out_of_range, negate, done; | |
2574 | |
2575 Ubfx(scratch, input_high, | |
2576 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
2577 // Load scratch with exponent. | |
2578 sub(scratch, scratch, Operand(HeapNumber::kExponentBias)); | |
2579 // If exponent is negative, 0 < input < 1, the result is 0. | |
2580 // If exponent is greater than or equal to 84, the 32 less significant | |
2581 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), | |
2582 // the result is 0. | |
2583 // This test also catch Nan and infinities which also return 0. | |
2584 cmp(scratch, Operand(84)); | |
2585 // We do an unsigned comparison so negative numbers are treated as big | |
2586 // positive number and the two tests above are done in one test. | |
2587 b(hs, &out_of_range); | |
2588 | |
2589 // Load scratch with 20 - exponent. | |
2590 rsb(scratch, scratch, Operand(20), SetCC); | |
2591 b(mi, &both); | |
2592 | |
2593 // Test 0 and -0. | |
2594 bic(result, input_high, Operand(HeapNumber::kSignMask)); | |
2595 orr(result, result, Operand(input_low), SetCC); | |
2596 b(eq, &done); | |
2597 // 0 <= exponent <= 20, shift only input_high. | |
2598 // Scratch contains: 20 - exponent. | |
2599 Ubfx(result, input_high, | |
2600 0, HeapNumber::kMantissaBitsInTopWord); | |
2601 // Set the implicit 1 before the mantissa part in input_high. | |
2602 orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); | |
2603 mov(result, Operand(result, LSR, scratch)); | |
2604 b(&negate); | |
2605 | |
2606 bind(&both); | |
2607 // Restore scratch to exponent - 1 to be consistent with ECMAToInt32VFP. | |
2608 rsb(scratch, scratch, Operand(19)); | |
2609 ECMAToInt32Tail(result, scratch, input_high, input_low, | |
2610 &out_of_range, &negate, &done); | |
2611 } | |
2612 | |
2613 | |
2614 void MacroAssembler::ECMAToInt32Tail(Register result, | 2535 void MacroAssembler::ECMAToInt32Tail(Register result, |
2615 Register scratch, | 2536 Register scratch, |
2616 Register input_high, | 2537 Register input_high, |
2617 Register input_low, | 2538 Register input_low, |
2618 Label* out_of_range, | 2539 Label* out_of_range, |
2619 Label* negate, | 2540 Label* negate, |
2620 Label* done) { | 2541 Label* done) { |
2621 Label only_low; | 2542 Label only_low; |
2622 | 2543 |
2623 // On entry, scratch contains exponent - 1. | 2544 // On entry, scratch contains exponent - 1. |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2706 | 2627 |
2707 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { | 2628 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
2708 CallRuntime(Runtime::FunctionForId(fid), num_arguments); | 2629 CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
2709 } | 2630 } |
2710 | 2631 |
2711 | 2632 |
2712 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { | 2633 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { |
2713 const Runtime::Function* function = Runtime::FunctionForId(id); | 2634 const Runtime::Function* function = Runtime::FunctionForId(id); |
2714 mov(r0, Operand(function->nargs)); | 2635 mov(r0, Operand(function->nargs)); |
2715 mov(r1, Operand(ExternalReference(function, isolate()))); | 2636 mov(r1, Operand(ExternalReference(function, isolate()))); |
2716 SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) | 2637 CEntryStub stub(1, kSaveFPRegs); |
2717 ? kSaveFPRegs | |
2718 : kDontSaveFPRegs; | |
2719 CEntryStub stub(1, mode); | |
2720 CallStub(&stub); | 2638 CallStub(&stub); |
2721 } | 2639 } |
2722 | 2640 |
2723 | 2641 |
2724 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | 2642 void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
2725 int num_arguments) { | 2643 int num_arguments) { |
2726 mov(r0, Operand(num_arguments)); | 2644 mov(r0, Operand(num_arguments)); |
2727 mov(r1, Operand(ext)); | 2645 mov(r1, Operand(ext)); |
2728 | 2646 |
2729 CEntryStub stub(1); | 2647 CEntryStub stub(1); |
(...skipping 724 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3454 } | 3372 } |
3455 | 3373 |
3456 | 3374 |
3457 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3375 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3458 Register scratch) { | 3376 Register scratch) { |
3459 PrepareCallCFunction(num_reg_arguments, 0, scratch); | 3377 PrepareCallCFunction(num_reg_arguments, 0, scratch); |
3460 } | 3378 } |
3461 | 3379 |
3462 | 3380 |
3463 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { | 3381 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { |
3464 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
3465 if (use_eabi_hardfloat()) { | 3382 if (use_eabi_hardfloat()) { |
3466 Move(d0, dreg); | 3383 Move(d0, dreg); |
3467 } else { | 3384 } else { |
3468 vmov(r0, r1, dreg); | 3385 vmov(r0, r1, dreg); |
3469 } | 3386 } |
3470 } | 3387 } |
3471 | 3388 |
3472 | 3389 |
3473 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, | 3390 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, |
3474 DwVfpRegister dreg2) { | 3391 DwVfpRegister dreg2) { |
3475 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
3476 if (use_eabi_hardfloat()) { | 3392 if (use_eabi_hardfloat()) { |
3477 if (dreg2.is(d0)) { | 3393 if (dreg2.is(d0)) { |
3478 ASSERT(!dreg1.is(d1)); | 3394 ASSERT(!dreg1.is(d1)); |
3479 Move(d1, dreg2); | 3395 Move(d1, dreg2); |
3480 Move(d0, dreg1); | 3396 Move(d0, dreg1); |
3481 } else { | 3397 } else { |
3482 Move(d0, dreg1); | 3398 Move(d0, dreg1); |
3483 Move(d1, dreg2); | 3399 Move(d1, dreg2); |
3484 } | 3400 } |
3485 } else { | 3401 } else { |
3486 vmov(r0, r1, dreg1); | 3402 vmov(r0, r1, dreg1); |
3487 vmov(r2, r3, dreg2); | 3403 vmov(r2, r3, dreg2); |
3488 } | 3404 } |
3489 } | 3405 } |
3490 | 3406 |
3491 | 3407 |
3492 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg, | 3408 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg, |
3493 Register reg) { | 3409 Register reg) { |
3494 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
3495 if (use_eabi_hardfloat()) { | 3410 if (use_eabi_hardfloat()) { |
3496 Move(d0, dreg); | 3411 Move(d0, dreg); |
3497 Move(r0, reg); | 3412 Move(r0, reg); |
3498 } else { | 3413 } else { |
3499 Move(r2, reg); | 3414 Move(r2, reg); |
3500 vmov(r0, r1, dreg); | 3415 vmov(r0, r1, dreg); |
3501 } | 3416 } |
3502 } | 3417 } |
3503 | 3418 |
3504 | 3419 |
(...skipping 442 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3947 void CodePatcher::EmitCondition(Condition cond) { | 3862 void CodePatcher::EmitCondition(Condition cond) { |
3948 Instr instr = Assembler::instr_at(masm_.pc_); | 3863 Instr instr = Assembler::instr_at(masm_.pc_); |
3949 instr = (instr & ~kCondMask) | cond; | 3864 instr = (instr & ~kCondMask) | cond; |
3950 masm_.emit(instr); | 3865 masm_.emit(instr); |
3951 } | 3866 } |
3952 | 3867 |
3953 | 3868 |
3954 } } // namespace v8::internal | 3869 } } // namespace v8::internal |
3955 | 3870 |
3956 #endif // V8_TARGET_ARCH_ARM | 3871 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |