Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(104)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 6594009: Implement int32 TypeRecordingBinaryOp on ARM. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after
264 } 264 }
265 if (shift_down != 0) { 265 if (shift_down != 0) {
266 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); 266 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
267 } 267 }
268 } else { 268 } else {
269 sbfx(dst, src1, lsb, width, cond); 269 sbfx(dst, src1, lsb, width, cond);
270 } 270 }
271 } 271 }
272 272
273 273
274 void MacroAssembler::Bfi(Register dst,
275 Register src,
276 Register scratch,
277 int lsb,
278 int width,
279 Condition cond) {
280 ASSERT(lsb < 32);
Søren Thygesen Gjesse 2011/02/28 09:54:32 ASSERT lsb >= 0 ASSERT on with and with + lsb as w
Søren Thygesen Gjesse 2011/03/02 09:33:08 Done.
281 ASSERT(!scratch.is(dst));
282 if (width == 0) return;
283 if (!CpuFeatures::IsSupported(ARMv7)) {
284 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
285 bic(dst, dst, Operand(mask));
286 and_(scratch, src, Operand((1 << width) - 1));
287 mov(scratch, Operand(scratch, LSL, lsb));
288 orr(dst, dst, scratch);
289 } else {
290 bfi(dst, src, lsb, width, cond);
291 }
292 }
293
294
274 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { 295 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
275 ASSERT(lsb < 32); 296 ASSERT(lsb < 32);
276 if (!CpuFeatures::IsSupported(ARMv7)) { 297 if (!CpuFeatures::IsSupported(ARMv7)) {
277 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 298 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
278 bic(dst, dst, Operand(mask)); 299 bic(dst, dst, Operand(mask));
279 } else { 300 } else {
280 bfc(dst, lsb, width, cond); 301 bfc(dst, lsb, width, cond);
281 } 302 }
282 } 303 }
283 304
(...skipping 1527 matching lines...) Expand 10 before | Expand all | Expand 10 after
1811 // This code is faster for doubles that are in the ranges -0x7fffffff to 1832 // This code is faster for doubles that are in the ranges -0x7fffffff to
1812 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to 1833 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
1813 // the range of signed int32 values that are not Smis. Jumps to the label 1834 // the range of signed int32 values that are not Smis. Jumps to the label
1814 // 'not_int32' if the double isn't in the range -0x80000000.0 to 1835 // 'not_int32' if the double isn't in the range -0x80000000.0 to
1815 // 0x80000000.0 (excluding the endpoints). 1836 // 0x80000000.0 (excluding the endpoints).
1816 Label right_exponent, done; 1837 Label right_exponent, done;
1817 // Get exponent word. 1838 // Get exponent word.
1818 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); 1839 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1819 // Get exponent alone in scratch2. 1840 // Get exponent alone in scratch2.
1820 Ubfx(scratch2, 1841 Ubfx(scratch2,
1821 scratch, 1842 scratch,
1822 HeapNumber::kExponentShift, 1843 HeapNumber::kExponentShift,
1823 HeapNumber::kExponentBits); 1844 HeapNumber::kExponentBits);
1824 // Load dest with zero. We use this either for the final shift or 1845 // Load dest with zero. We use this either for the final shift or
1825 // for the answer. 1846 // for the answer.
1826 mov(dest, Operand(0, RelocInfo::NONE)); 1847 mov(dest, Operand(0, RelocInfo::NONE));
1827 // Check whether the exponent matches a 32 bit signed int that is not a Smi. 1848 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1828 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is 1849 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1829 // the exponent that we are fastest at and also the highest exponent we can 1850 // the exponent that we are fastest at and also the highest exponent we can
1830 // handle here. 1851 // handle here.
1831 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; 1852 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
1832 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we 1853 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
1833 // split it up to avoid a constant pool entry. You can't do that in general 1854 // split it up to avoid a constant pool entry. You can't do that in general
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1876 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); 1897 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
1877 // Move down according to the exponent. 1898 // Move down according to the exponent.
1878 mov(dest, Operand(scratch, LSR, dest)); 1899 mov(dest, Operand(scratch, LSR, dest));
1879 // Fix sign if sign bit was set. 1900 // Fix sign if sign bit was set.
1880 rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne); 1901 rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1881 bind(&done); 1902 bind(&done);
1882 } 1903 }
1883 } 1904 }
1884 1905
1885 1906
1907 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
1908 SwVfpRegister result,
1909 DwVfpRegister double_input,
1910 Register scratch1,
1911 Register scratch2,
1912 CheckForInexactConversion check_inexact) {
1913 ASSERT(CpuFeatures::IsSupported(VFP3));
1914 CpuFeatures::Scope scope(VFP3);
1915 Register prev_fpscr = scratch1;
1916 Register scratch = scratch2;
1917
1918 int32_t check_inexact_conversion =
1919 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
1920
1921 // Set custom FPCSR:
1922 // - Set rounding mode.
1923 // - Clear vfp cumulative exception flags.
1924 // - Make sure Flush-to-zero mode control bit is unset.
1925 vmrs(prev_fpscr);
1926 bic(scratch,
1927 prev_fpscr,
1928 Operand(kVFPExceptionMask |
1929 check_inexact_conversion |
1930 kVFPRoundingModeMask |
1931 kVFPFlushToZeroMask));
1932 // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
1933 if (rounding_mode != kRoundToNearest) {
1934 orr(scratch, scratch, Operand(rounding_mode));
1935 }
1936 vmsr(scratch);
1937
1938 // Convert the argument to an integer.
1939 vcvt_s32_f64(result,
1940 double_input,
1941 (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
1942 : kFPSCRRounding);
1943
1944 // Retrieve FPSCR.
1945 vmrs(scratch);
1946 // Restore FPSCR.
1947 vmsr(prev_fpscr);
1948 // Check for vfp exceptions.
1949 tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
1950 }
1951
1952
1886 void MacroAssembler::GetLeastBitsFromSmi(Register dst, 1953 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1887 Register src, 1954 Register src,
1888 int num_least_bits) { 1955 int num_least_bits) {
1889 if (CpuFeatures::IsSupported(ARMv7)) { 1956 if (CpuFeatures::IsSupported(ARMv7)) {
1890 ubfx(dst, src, kSmiTagSize, num_least_bits); 1957 ubfx(dst, src, kSmiTagSize, num_least_bits);
1891 } else { 1958 } else {
1892 mov(dst, Operand(src, ASR, kSmiTagSize)); 1959 mov(dst, Operand(src, ASR, kSmiTagSize));
1893 and_(dst, dst, Operand((1 << num_least_bits) - 1)); 1960 and_(dst, dst, Operand((1 << num_least_bits) - 1));
1894 } 1961 }
1895 } 1962 }
(...skipping 673 matching lines...) Expand 10 before | Expand all | Expand 10 after
2569 void CodePatcher::EmitCondition(Condition cond) { 2636 void CodePatcher::EmitCondition(Condition cond) {
2570 Instr instr = Assembler::instr_at(masm_.pc_); 2637 Instr instr = Assembler::instr_at(masm_.pc_);
2571 instr = (instr & ~kCondMask) | cond; 2638 instr = (instr & ~kCondMask) | cond;
2572 masm_.emit(instr); 2639 masm_.emit(instr);
2573 } 2640 }
2574 2641
2575 2642
2576 } } // namespace v8::internal 2643 } } // namespace v8::internal
2577 2644
2578 #endif // V8_TARGET_ARCH_ARM 2645 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698