OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 774 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
785 if (value.bits == zero.bits) { | 785 if (value.bits == zero.bits) { |
786 vmov(dst, kDoubleRegZero); | 786 vmov(dst, kDoubleRegZero); |
787 } else if (value.bits == minus_zero.bits) { | 787 } else if (value.bits == minus_zero.bits) { |
788 vneg(dst, kDoubleRegZero); | 788 vneg(dst, kDoubleRegZero); |
789 } else { | 789 } else { |
790 vmov(dst, imm, scratch); | 790 vmov(dst, imm, scratch); |
791 } | 791 } |
792 } | 792 } |
793 | 793 |
794 | 794 |
| 795 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) { |
| 796 if (src.code() < 16) { |
| 797 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); |
| 798 vmov(dst, loc.high()); |
| 799 } else { |
| 800 vmov(dst, VmovIndexHi, src); |
| 801 } |
| 802 } |
| 803 |
| 804 |
| 805 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) { |
| 806 if (dst.code() < 16) { |
| 807 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); |
| 808 vmov(loc.high(), src); |
| 809 } else { |
| 810 vmov(dst, VmovIndexHi, src); |
| 811 } |
| 812 } |
| 813 |
| 814 |
| 815 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) { |
| 816 if (src.code() < 16) { |
| 817 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); |
| 818 vmov(dst, loc.low()); |
| 819 } else { |
| 820 vmov(dst, VmovIndexLo, src); |
| 821 } |
| 822 } |
| 823 |
| 824 |
| 825 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { |
| 826 if (dst.code() < 16) { |
| 827 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); |
| 828 vmov(loc.low(), src); |
| 829 } else { |
| 830 vmov(dst, VmovIndexLo, src); |
| 831 } |
| 832 } |
| 833 |
| 834 |
795 void MacroAssembler::ConvertNumberToInt32(Register object, | 835 void MacroAssembler::ConvertNumberToInt32(Register object, |
796 Register dst, | 836 Register dst, |
797 Register heap_number_map, | 837 Register heap_number_map, |
798 Register scratch1, | 838 Register scratch1, |
799 Register scratch2, | 839 Register scratch2, |
800 Register scratch3, | 840 Register scratch3, |
801 DwVfpRegister double_scratch1, | 841 DwVfpRegister double_scratch1, |
802 DwVfpRegister double_scratch2, | 842 LowDwVfpRegister double_scratch2, |
803 Label* not_number) { | 843 Label* not_number) { |
804 Label done; | 844 Label done; |
805 UntagAndJumpIfSmi(dst, object, &done); | 845 UntagAndJumpIfSmi(dst, object, &done); |
806 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 846 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
807 vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset)); | 847 vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset)); |
808 ECMAToInt32(dst, double_scratch1, | 848 ECMAToInt32(dst, double_scratch1, |
809 scratch1, scratch2, scratch3, double_scratch2); | 849 scratch1, scratch2, scratch3, double_scratch2); |
810 | 850 |
811 bind(&done); | 851 bind(&done); |
812 } | 852 } |
813 | 853 |
814 | 854 |
815 void MacroAssembler::LoadNumber(Register object, | 855 void MacroAssembler::LoadNumber(Register object, |
816 DwVfpRegister dst, | 856 LowDwVfpRegister dst, |
817 Register heap_number_map, | 857 Register heap_number_map, |
818 Register scratch, | 858 Register scratch, |
819 Label* not_number) { | 859 Label* not_number) { |
820 Label is_smi, done; | 860 Label is_smi, done; |
821 | 861 |
822 UntagAndJumpIfSmi(scratch, object, &is_smi); | 862 UntagAndJumpIfSmi(scratch, object, &is_smi); |
823 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); | 863 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); |
824 | 864 |
825 vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | 865 vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
826 b(&done); | 866 b(&done); |
827 | 867 |
828 // Handle loading a double from a smi. | 868 // Handle loading a double from a smi. |
829 bind(&is_smi); | 869 bind(&is_smi); |
830 vmov(dst.high(), scratch); | 870 vmov(dst.high(), scratch); |
831 vcvt_f64_s32(dst, dst.high()); | 871 vcvt_f64_s32(dst, dst.high()); |
832 | 872 |
833 bind(&done); | 873 bind(&done); |
834 } | 874 } |
835 | 875 |
836 | 876 |
837 void MacroAssembler::LoadNumberAsInt32Double(Register object, | 877 void MacroAssembler::LoadNumberAsInt32Double(Register object, |
838 DwVfpRegister double_dst, | 878 DwVfpRegister double_dst, |
839 Register heap_number_map, | 879 Register heap_number_map, |
840 Register scratch, | 880 Register scratch, |
841 DwVfpRegister double_scratch, | 881 LowDwVfpRegister double_scratch, |
842 Label* not_int32) { | 882 Label* not_int32) { |
843 ASSERT(!scratch.is(object)); | 883 ASSERT(!scratch.is(object)); |
844 ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch)); | 884 ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch)); |
845 | 885 |
846 Label done, obj_is_not_smi; | 886 Label done, obj_is_not_smi; |
847 | 887 |
848 UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi); | 888 UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi); |
849 vmov(double_scratch.low(), scratch); | 889 vmov(double_scratch.low(), scratch); |
850 vcvt_f64_s32(double_dst, double_scratch.low()); | 890 vcvt_f64_s32(double_dst, double_scratch.low()); |
851 b(&done); | 891 b(&done); |
(...skipping 11 matching lines...) Expand all Loading... |
863 | 903 |
864 bind(&done); | 904 bind(&done); |
865 } | 905 } |
866 | 906 |
867 | 907 |
868 void MacroAssembler::LoadNumberAsInt32(Register object, | 908 void MacroAssembler::LoadNumberAsInt32(Register object, |
869 Register dst, | 909 Register dst, |
870 Register heap_number_map, | 910 Register heap_number_map, |
871 Register scratch, | 911 Register scratch, |
872 DwVfpRegister double_scratch0, | 912 DwVfpRegister double_scratch0, |
873 DwVfpRegister double_scratch1, | 913 LowDwVfpRegister double_scratch1, |
874 Label* not_int32) { | 914 Label* not_int32) { |
875 ASSERT(!dst.is(object)); | 915 ASSERT(!dst.is(object)); |
876 ASSERT(!scratch.is(object)); | 916 ASSERT(!scratch.is(object)); |
877 | 917 |
878 Label done, maybe_undefined; | 918 Label done, maybe_undefined; |
879 | 919 |
880 UntagAndJumpIfSmi(dst, object, &done); | 920 UntagAndJumpIfSmi(dst, object, &done); |
881 | 921 |
882 JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined); | 922 JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined); |
883 | 923 |
(...skipping 1175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2059 Register scratch, | 2099 Register scratch, |
2060 Label* fail) { | 2100 Label* fail) { |
2061 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 2101 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
2062 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 2102 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
2063 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 2103 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
2064 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 2104 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
2065 b(hi, fail); | 2105 b(hi, fail); |
2066 } | 2106 } |
2067 | 2107 |
2068 | 2108 |
2069 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, | 2109 void MacroAssembler::StoreNumberToDoubleElements( |
2070 Register key_reg, | 2110 Register value_reg, |
2071 Register elements_reg, | 2111 Register key_reg, |
2072 Register scratch1, | 2112 Register elements_reg, |
2073 Label* fail, | 2113 Register scratch1, |
2074 int elements_offset) { | 2114 LowDwVfpRegister double_scratch0, |
| 2115 Label* fail, |
| 2116 int elements_offset) { |
2075 Label smi_value, store; | 2117 Label smi_value, store; |
2076 | 2118 |
2077 // Handle smi values specially. | 2119 // Handle smi values specially. |
2078 JumpIfSmi(value_reg, &smi_value); | 2120 JumpIfSmi(value_reg, &smi_value); |
2079 | 2121 |
2080 // Ensure that the object is a heap number | 2122 // Ensure that the object is a heap number |
2081 CheckMap(value_reg, | 2123 CheckMap(value_reg, |
2082 scratch1, | 2124 scratch1, |
2083 isolate()->factory()->heap_number_map(), | 2125 isolate()->factory()->heap_number_map(), |
2084 fail, | 2126 fail, |
2085 DONT_DO_SMI_CHECK); | 2127 DONT_DO_SMI_CHECK); |
2086 | 2128 |
2087 vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 2129 vldr(double_scratch0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
2088 // Force a canonical NaN. | 2130 // Force a canonical NaN. |
2089 if (emit_debug_code()) { | 2131 if (emit_debug_code()) { |
2090 vmrs(ip); | 2132 vmrs(ip); |
2091 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); | 2133 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); |
2092 Assert(ne, "Default NaN mode not set"); | 2134 Assert(ne, "Default NaN mode not set"); |
2093 } | 2135 } |
2094 VFPCanonicalizeNaN(d0); | 2136 VFPCanonicalizeNaN(double_scratch0); |
2095 b(&store); | 2137 b(&store); |
2096 | 2138 |
2097 bind(&smi_value); | 2139 bind(&smi_value); |
2098 SmiToDouble(d0, value_reg); | 2140 SmiToDouble(double_scratch0, value_reg); |
2099 | 2141 |
2100 bind(&store); | 2142 bind(&store); |
2101 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); | 2143 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); |
2102 vstr(d0, FieldMemOperand(scratch1, | 2144 vstr(double_scratch0, |
2103 FixedDoubleArray::kHeaderSize - elements_offset)); | 2145 FieldMemOperand(scratch1, |
| 2146 FixedDoubleArray::kHeaderSize - elements_offset)); |
2104 } | 2147 } |
2105 | 2148 |
2106 | 2149 |
2107 void MacroAssembler::CompareMap(Register obj, | 2150 void MacroAssembler::CompareMap(Register obj, |
2108 Register scratch, | 2151 Register scratch, |
2109 Handle<Map> map, | 2152 Handle<Map> map, |
2110 Label* early_success) { | 2153 Label* early_success) { |
2111 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2154 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2112 CompareMap(scratch, map, early_success); | 2155 CompareMap(scratch, map, early_success); |
2113 } | 2156 } |
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2399 // conflict. | 2442 // conflict. |
2400 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 2443 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < |
2401 (1 << String::kArrayIndexValueBits)); | 2444 (1 << String::kArrayIndexValueBits)); |
2402 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in | 2445 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in |
2403 // the low kHashShift bits. | 2446 // the low kHashShift bits. |
2404 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); | 2447 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); |
2405 SmiTag(index, hash); | 2448 SmiTag(index, hash); |
2406 } | 2449 } |
2407 | 2450 |
2408 | 2451 |
2409 void MacroAssembler::SmiToDouble(DwVfpRegister value, Register smi) { | 2452 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { |
2410 ASSERT(value.code() < 16); | |
2411 if (CpuFeatures::IsSupported(VFP3)) { | 2453 if (CpuFeatures::IsSupported(VFP3)) { |
2412 vmov(value.low(), smi); | 2454 vmov(value.low(), smi); |
2413 vcvt_f64_s32(value, 1); | 2455 vcvt_f64_s32(value, 1); |
2414 } else { | 2456 } else { |
2415 SmiUntag(ip, smi); | 2457 SmiUntag(ip, smi); |
2416 vmov(value.low(), ip); | 2458 vmov(value.low(), ip); |
2417 vcvt_f64_s32(value, value.low()); | 2459 vcvt_f64_s32(value, value.low()); |
2418 } | 2460 } |
2419 } | 2461 } |
2420 | 2462 |
2421 | 2463 |
2422 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, | 2464 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, |
2423 DwVfpRegister double_scratch) { | 2465 LowDwVfpRegister double_scratch) { |
2424 ASSERT(!double_input.is(double_scratch)); | 2466 ASSERT(!double_input.is(double_scratch)); |
2425 vcvt_s32_f64(double_scratch.low(), double_input); | 2467 vcvt_s32_f64(double_scratch.low(), double_input); |
2426 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2468 vcvt_f64_s32(double_scratch, double_scratch.low()); |
2427 VFPCompareAndSetFlags(double_input, double_scratch); | 2469 VFPCompareAndSetFlags(double_input, double_scratch); |
2428 } | 2470 } |
2429 | 2471 |
2430 | 2472 |
2431 void MacroAssembler::TryDoubleToInt32Exact(Register result, | 2473 void MacroAssembler::TryDoubleToInt32Exact(Register result, |
2432 DwVfpRegister double_input, | 2474 DwVfpRegister double_input, |
2433 DwVfpRegister double_scratch) { | 2475 LowDwVfpRegister double_scratch) { |
2434 ASSERT(!double_input.is(double_scratch)); | 2476 ASSERT(!double_input.is(double_scratch)); |
2435 vcvt_s32_f64(double_scratch.low(), double_input); | 2477 vcvt_s32_f64(double_scratch.low(), double_input); |
2436 vmov(result, double_scratch.low()); | 2478 vmov(result, double_scratch.low()); |
2437 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2479 vcvt_f64_s32(double_scratch, double_scratch.low()); |
2438 VFPCompareAndSetFlags(double_input, double_scratch); | 2480 VFPCompareAndSetFlags(double_input, double_scratch); |
2439 } | 2481 } |
2440 | 2482 |
2441 | 2483 |
2442 void MacroAssembler::TryInt32Floor(Register result, | 2484 void MacroAssembler::TryInt32Floor(Register result, |
2443 DwVfpRegister double_input, | 2485 DwVfpRegister double_input, |
2444 Register input_high, | 2486 Register input_high, |
2445 DwVfpRegister double_scratch, | 2487 LowDwVfpRegister double_scratch, |
2446 Label* done, | 2488 Label* done, |
2447 Label* exact) { | 2489 Label* exact) { |
2448 ASSERT(!result.is(input_high)); | 2490 ASSERT(!result.is(input_high)); |
2449 ASSERT(!double_input.is(double_scratch)); | 2491 ASSERT(!double_input.is(double_scratch)); |
2450 Label negative, exception; | 2492 Label negative, exception; |
2451 | 2493 |
| 2494 VmovHigh(input_high, double_input); |
| 2495 |
2452 // Test for NaN and infinities. | 2496 // Test for NaN and infinities. |
2453 Sbfx(result, input_high, | 2497 Sbfx(result, input_high, |
2454 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 2498 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
2455 cmp(result, Operand(-1)); | 2499 cmp(result, Operand(-1)); |
2456 b(eq, &exception); | 2500 b(eq, &exception); |
2457 // Test for values that can be exactly represented as a | 2501 // Test for values that can be exactly represented as a |
2458 // signed 32-bit integer. | 2502 // signed 32-bit integer. |
2459 TryDoubleToInt32Exact(result, double_input, double_scratch); | 2503 TryDoubleToInt32Exact(result, double_input, double_scratch); |
2460 // If exact, return (result already fetched). | 2504 // If exact, return (result already fetched). |
2461 b(eq, exact); | 2505 b(eq, exact); |
(...skipping 20 matching lines...) Expand all Loading... |
2482 b(mi, done); | 2526 b(mi, done); |
2483 bind(&exception); | 2527 bind(&exception); |
2484 } | 2528 } |
2485 | 2529 |
2486 | 2530 |
2487 void MacroAssembler::ECMAToInt32(Register result, | 2531 void MacroAssembler::ECMAToInt32(Register result, |
2488 DwVfpRegister double_input, | 2532 DwVfpRegister double_input, |
2489 Register scratch, | 2533 Register scratch, |
2490 Register scratch_high, | 2534 Register scratch_high, |
2491 Register scratch_low, | 2535 Register scratch_low, |
2492 DwVfpRegister double_scratch) { | 2536 LowDwVfpRegister double_scratch) { |
2493 ASSERT(!scratch_high.is(result)); | 2537 ASSERT(!scratch_high.is(result)); |
2494 ASSERT(!scratch_low.is(result)); | 2538 ASSERT(!scratch_low.is(result)); |
2495 ASSERT(!scratch_low.is(scratch_high)); | 2539 ASSERT(!scratch_low.is(scratch_high)); |
2496 ASSERT(!scratch.is(result) && | 2540 ASSERT(!scratch.is(result) && |
2497 !scratch.is(scratch_high) && | 2541 !scratch.is(scratch_high) && |
2498 !scratch.is(scratch_low)); | 2542 !scratch.is(scratch_low)); |
2499 ASSERT(!double_input.is(double_scratch)); | 2543 ASSERT(!double_input.is(double_scratch)); |
2500 | 2544 |
2501 Label out_of_range, only_low, negate, done; | 2545 Label out_of_range, only_low, negate, done; |
2502 | 2546 |
(...skipping 633 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3136 Label* gc_required) { | 3180 Label* gc_required) { |
3137 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); | 3181 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); |
3138 sub(scratch1, result, Operand(kHeapObjectTag)); | 3182 sub(scratch1, result, Operand(kHeapObjectTag)); |
3139 vstr(value, scratch1, HeapNumber::kValueOffset); | 3183 vstr(value, scratch1, HeapNumber::kValueOffset); |
3140 } | 3184 } |
3141 | 3185 |
3142 | 3186 |
3143 // Copies a fixed number of fields of heap objects from src to dst. | 3187 // Copies a fixed number of fields of heap objects from src to dst. |
3144 void MacroAssembler::CopyFields(Register dst, | 3188 void MacroAssembler::CopyFields(Register dst, |
3145 Register src, | 3189 Register src, |
3146 DwVfpRegister double_scratch, | 3190 LowDwVfpRegister double_scratch, |
3147 SwVfpRegister single_scratch, | |
3148 int field_count) { | 3191 int field_count) { |
3149 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize); | 3192 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize); |
3150 for (int i = 0; i < double_count; i++) { | 3193 for (int i = 0; i < double_count; i++) { |
3151 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes)); | 3194 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes)); |
3152 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes)); | 3195 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes)); |
3153 } | 3196 } |
3154 | 3197 |
3155 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize); | 3198 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize); |
3156 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes); | 3199 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes); |
3157 | 3200 |
3158 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize); | 3201 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize); |
3159 if (remain != 0) { | 3202 if (remain != 0) { |
3160 vldr(single_scratch, | 3203 vldr(double_scratch.low(), |
3161 FieldMemOperand(src, (field_count - 1) * kPointerSize)); | 3204 FieldMemOperand(src, (field_count - 1) * kPointerSize)); |
3162 vstr(single_scratch, | 3205 vstr(double_scratch.low(), |
3163 FieldMemOperand(dst, (field_count - 1) * kPointerSize)); | 3206 FieldMemOperand(dst, (field_count - 1) * kPointerSize)); |
3164 } | 3207 } |
3165 } | 3208 } |
3166 | 3209 |
3167 | 3210 |
3168 void MacroAssembler::CopyBytes(Register src, | 3211 void MacroAssembler::CopyBytes(Register src, |
3169 Register dst, | 3212 Register dst, |
3170 Register length, | 3213 Register length, |
3171 Register scratch) { | 3214 Register scratch) { |
3172 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; | 3215 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; |
(...skipping 480 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3653 } | 3696 } |
3654 | 3697 |
3655 | 3698 |
3656 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { | 3699 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { |
3657 Usat(output_reg, 8, Operand(input_reg)); | 3700 Usat(output_reg, 8, Operand(input_reg)); |
3658 } | 3701 } |
3659 | 3702 |
3660 | 3703 |
3661 void MacroAssembler::ClampDoubleToUint8(Register result_reg, | 3704 void MacroAssembler::ClampDoubleToUint8(Register result_reg, |
3662 DwVfpRegister input_reg, | 3705 DwVfpRegister input_reg, |
3663 DwVfpRegister temp_double_reg) { | 3706 LowDwVfpRegister temp_double_reg) { |
3664 Label above_zero; | 3707 Label above_zero; |
3665 Label done; | 3708 Label done; |
3666 Label in_bounds; | 3709 Label in_bounds; |
3667 | 3710 |
3668 Vmov(temp_double_reg, 0.0); | 3711 VFPCompareAndSetFlags(input_reg, 0.0); |
3669 VFPCompareAndSetFlags(input_reg, temp_double_reg); | |
3670 b(gt, &above_zero); | 3712 b(gt, &above_zero); |
3671 | 3713 |
3672 // Double value is less than zero, NaN or Inf, return 0. | 3714 // Double value is less than zero, NaN or Inf, return 0. |
3673 mov(result_reg, Operand::Zero()); | 3715 mov(result_reg, Operand::Zero()); |
3674 b(al, &done); | 3716 b(al, &done); |
3675 | 3717 |
3676 // Double value is >= 255, return 255. | 3718 // Double value is >= 255, return 255. |
3677 bind(&above_zero); | 3719 bind(&above_zero); |
3678 Vmov(temp_double_reg, 255.0, result_reg); | 3720 Vmov(temp_double_reg, 255.0, result_reg); |
3679 VFPCompareAndSetFlags(input_reg, temp_double_reg); | 3721 VFPCompareAndSetFlags(input_reg, temp_double_reg); |
3680 b(le, &in_bounds); | 3722 b(le, &in_bounds); |
3681 mov(result_reg, Operand(255)); | 3723 mov(result_reg, Operand(255)); |
3682 b(al, &done); | 3724 b(al, &done); |
3683 | 3725 |
3684 // In 0-255 range, round and truncate. | 3726 // In 0-255 range, round and truncate. |
3685 bind(&in_bounds); | 3727 bind(&in_bounds); |
3686 // Save FPSCR. | 3728 // Save FPSCR. |
3687 vmrs(ip); | 3729 vmrs(ip); |
3688 // Set rounding mode to round to the nearest integer by clearing bits[23:22]. | 3730 // Set rounding mode to round to the nearest integer by clearing bits[23:22]. |
3689 bic(result_reg, ip, Operand(kVFPRoundingModeMask)); | 3731 bic(result_reg, ip, Operand(kVFPRoundingModeMask)); |
3690 vmsr(result_reg); | 3732 vmsr(result_reg); |
3691 vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding); | 3733 vcvt_s32_f64(temp_double_reg.low(), input_reg, kFPSCRRounding); |
3692 vmov(result_reg, input_reg.low()); | 3734 vmov(result_reg, temp_double_reg.low()); |
3693 // Restore FPSCR. | 3735 // Restore FPSCR. |
3694 vmsr(ip); | 3736 vmsr(ip); |
3695 bind(&done); | 3737 bind(&done); |
3696 } | 3738 } |
3697 | 3739 |
3698 | 3740 |
3699 void MacroAssembler::LoadInstanceDescriptors(Register map, | 3741 void MacroAssembler::LoadInstanceDescriptors(Register map, |
3700 Register descriptors) { | 3742 Register descriptors) { |
3701 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); | 3743 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); |
3702 } | 3744 } |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3834 void CodePatcher::EmitCondition(Condition cond) { | 3876 void CodePatcher::EmitCondition(Condition cond) { |
3835 Instr instr = Assembler::instr_at(masm_.pc_); | 3877 Instr instr = Assembler::instr_at(masm_.pc_); |
3836 instr = (instr & ~kCondMask) | cond; | 3878 instr = (instr & ~kCondMask) | cond; |
3837 masm_.emit(instr); | 3879 masm_.emit(instr); |
3838 } | 3880 } |
3839 | 3881 |
3840 | 3882 |
3841 } } // namespace v8::internal | 3883 } } // namespace v8::internal |
3842 | 3884 |
3843 #endif // V8_TARGET_ARCH_ARM | 3885 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |