| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
| 6 // are met: | 6 // are met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 265 | 265 |
| 266 | 266 |
| 267 // Spare buffer. | 267 // Spare buffer. |
| 268 static const int kMinimalBufferSize = 4*KB; | 268 static const int kMinimalBufferSize = 4*KB; |
| 269 | 269 |
| 270 | 270 |
| 271 Assembler::Assembler(void* buffer, int buffer_size) | 271 Assembler::Assembler(void* buffer, int buffer_size) |
| 272 : positions_recorder_(this), | 272 : positions_recorder_(this), |
| 273 allow_peephole_optimization_(false) { | 273 allow_peephole_optimization_(false) { |
| 274 Isolate* isolate = Isolate::Current(); | 274 Isolate* isolate = Isolate::Current(); |
| 275 // BUG(3245989): disable peephole optimization if crankshaft is enabled. | |
| 276 allow_peephole_optimization_ = FLAG_peephole_optimization; | 275 allow_peephole_optimization_ = FLAG_peephole_optimization; |
| 277 if (buffer == NULL) { | 276 if (buffer == NULL) { |
| 278 // Do our own buffer management. | 277 // Do our own buffer management. |
| 279 if (buffer_size <= kMinimalBufferSize) { | 278 if (buffer_size <= kMinimalBufferSize) { |
| 280 buffer_size = kMinimalBufferSize; | 279 buffer_size = kMinimalBufferSize; |
| 281 | 280 |
| 282 if (isolate->assembler_spare_buffer() != NULL) { | 281 if (isolate->assembler_spare_buffer() != NULL) { |
| 283 buffer = isolate->assembler_spare_buffer(); | 282 buffer = isolate->assembler_spare_buffer(); |
| 284 isolate->set_assembler_spare_buffer(NULL); | 283 isolate->set_assembler_spare_buffer(NULL); |
| 285 } | 284 } |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 347 } | 346 } |
| 348 } | 347 } |
| 349 | 348 |
| 350 | 349 |
| 351 void Assembler::CodeTargetAlign() { | 350 void Assembler::CodeTargetAlign() { |
| 352 // Preferred alignment of jump targets on some ARM chips. | 351 // Preferred alignment of jump targets on some ARM chips. |
| 353 Align(8); | 352 Align(8); |
| 354 } | 353 } |
| 355 | 354 |
| 356 | 355 |
| 356 Condition Assembler::GetCondition(Instr instr) { |
| 357 return Instruction::ConditionField(instr); |
| 358 } |
| 359 |
| 360 |
| 357 bool Assembler::IsBranch(Instr instr) { | 361 bool Assembler::IsBranch(Instr instr) { |
| 358 return (instr & (B27 | B25)) == (B27 | B25); | 362 return (instr & (B27 | B25)) == (B27 | B25); |
| 359 } | 363 } |
| 360 | 364 |
| 361 | 365 |
| 362 int Assembler::GetBranchOffset(Instr instr) { | 366 int Assembler::GetBranchOffset(Instr instr) { |
| 363 ASSERT(IsBranch(instr)); | 367 ASSERT(IsBranch(instr)); |
| 364 // Take the jump offset in the lower 24 bits, sign extend it and multiply it | 368 // Take the jump offset in the lower 24 bits, sign extend it and multiply it |
| 365 // with 4 to get the offset in bytes. | 369 // with 4 to get the offset in bytes. |
| 366 return ((instr & kImm24Mask) << 8) >> 6; | 370 return ((instr & kImm24Mask) << 8) >> 6; |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 423 } | 427 } |
| 424 | 428 |
| 425 | 429 |
| 426 Register Assembler::GetRd(Instr instr) { | 430 Register Assembler::GetRd(Instr instr) { |
| 427 Register reg; | 431 Register reg; |
| 428 reg.code_ = Instruction::RdValue(instr); | 432 reg.code_ = Instruction::RdValue(instr); |
| 429 return reg; | 433 return reg; |
| 430 } | 434 } |
| 431 | 435 |
| 432 | 436 |
| 437 Register Assembler::GetRn(Instr instr) { |
| 438 Register reg; |
| 439 reg.code_ = Instruction::RnValue(instr); |
| 440 return reg; |
| 441 } |
| 442 |
| 443 |
| 444 Register Assembler::GetRm(Instr instr) { |
| 445 Register reg; |
| 446 reg.code_ = Instruction::RmValue(instr); |
| 447 return reg; |
| 448 } |
| 449 |
| 450 |
| 433 bool Assembler::IsPush(Instr instr) { | 451 bool Assembler::IsPush(Instr instr) { |
| 434 return ((instr & ~kRdMask) == kPushRegPattern); | 452 return ((instr & ~kRdMask) == kPushRegPattern); |
| 435 } | 453 } |
| 436 | 454 |
| 437 | 455 |
| 438 bool Assembler::IsPop(Instr instr) { | 456 bool Assembler::IsPop(Instr instr) { |
| 439 return ((instr & ~kRdMask) == kPopRegPattern); | 457 return ((instr & ~kRdMask) == kPopRegPattern); |
| 440 } | 458 } |
| 441 | 459 |
| 442 | 460 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 460 } | 478 } |
| 461 | 479 |
| 462 | 480 |
| 463 bool Assembler::IsLdrPcImmediateOffset(Instr instr) { | 481 bool Assembler::IsLdrPcImmediateOffset(Instr instr) { |
| 464 // Check the instruction is indeed a | 482 // Check the instruction is indeed a |
| 465 // ldr<cond> <Rd>, [pc +/- offset_12]. | 483 // ldr<cond> <Rd>, [pc +/- offset_12]. |
| 466 return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000; | 484 return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000; |
| 467 } | 485 } |
| 468 | 486 |
| 469 | 487 |
| 488 bool Assembler::IsTstImmediate(Instr instr) { |
| 489 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == |
| 490 (I | TST | S); |
| 491 } |
| 492 |
| 493 |
| 494 bool Assembler::IsCmpRegister(Instr instr) { |
| 495 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) == |
| 496 (CMP | S); |
| 497 } |
| 498 |
| 499 |
| 500 bool Assembler::IsCmpImmediate(Instr instr) { |
| 501 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == |
| 502 (I | CMP | S); |
| 503 } |
| 504 |
| 505 |
| 506 Register Assembler::GetCmpImmediateRegister(Instr instr) { |
| 507 ASSERT(IsCmpImmediate(instr)); |
| 508 return GetRn(instr); |
| 509 } |
| 510 |
| 511 |
| 512 int Assembler::GetCmpImmediateRawImmediate(Instr instr) { |
| 513 ASSERT(IsCmpImmediate(instr)); |
| 514 return instr & kOff12Mask; |
| 515 } |
| 516 |
| 470 // Labels refer to positions in the (to be) generated code. | 517 // Labels refer to positions in the (to be) generated code. |
| 471 // There are bound, linked, and unused labels. | 518 // There are bound, linked, and unused labels. |
| 472 // | 519 // |
| 473 // Bound labels refer to known positions in the already | 520 // Bound labels refer to known positions in the already |
| 474 // generated code. pos() is the position the label refers to. | 521 // generated code. pos() is the position the label refers to. |
| 475 // | 522 // |
| 476 // Linked labels refer to unknown positions in the code | 523 // Linked labels refer to unknown positions in the code |
| 477 // to be generated; pos() is the position of the last | 524 // to be generated; pos() is the position of the last |
| 478 // instruction using the label. | 525 // instruction using the label. |
| 479 | 526 |
| (...skipping 568 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1048 void Assembler::teq(Register src1, const Operand& src2, Condition cond) { | 1095 void Assembler::teq(Register src1, const Operand& src2, Condition cond) { |
| 1049 addrmod1(cond | TEQ | S, src1, r0, src2); | 1096 addrmod1(cond | TEQ | S, src1, r0, src2); |
| 1050 } | 1097 } |
| 1051 | 1098 |
| 1052 | 1099 |
| 1053 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { | 1100 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { |
| 1054 addrmod1(cond | CMP | S, src1, r0, src2); | 1101 addrmod1(cond | CMP | S, src1, r0, src2); |
| 1055 } | 1102 } |
| 1056 | 1103 |
| 1057 | 1104 |
| 1105 void Assembler::cmp_raw_immediate( |
| 1106 Register src, int raw_immediate, Condition cond) { |
| 1107 ASSERT(is_uint12(raw_immediate)); |
| 1108 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate); |
| 1109 } |
| 1110 |
| 1111 |
| 1058 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { | 1112 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { |
| 1059 addrmod1(cond | CMN | S, src1, r0, src2); | 1113 addrmod1(cond | CMN | S, src1, r0, src2); |
| 1060 } | 1114 } |
| 1061 | 1115 |
| 1062 | 1116 |
| 1063 void Assembler::orr(Register dst, Register src1, const Operand& src2, | 1117 void Assembler::orr(Register dst, Register src1, const Operand& src2, |
| 1064 SBit s, Condition cond) { | 1118 SBit s, Condition cond) { |
| 1065 addrmod1(cond | ORR | s, src1, dst, src2); | 1119 addrmod1(cond | ORR | s, src1, dst, src2); |
| 1066 } | 1120 } |
| 1067 | 1121 |
| (...skipping 722 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1790 // Ddst = MEM(Rbase + offset). | 1844 // Ddst = MEM(Rbase + offset). |
| 1791 // Instruction details available in ARM DDI 0406A, A8-628. | 1845 // Instruction details available in ARM DDI 0406A, A8-628. |
| 1792 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | | 1846 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | |
| 1793 // Vdst(15-12) | 1011(11-8) | offset | 1847 // Vdst(15-12) | 1011(11-8) | offset |
| 1794 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); | 1848 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); |
| 1795 int u = 1; | 1849 int u = 1; |
| 1796 if (offset < 0) { | 1850 if (offset < 0) { |
| 1797 offset = -offset; | 1851 offset = -offset; |
| 1798 u = 0; | 1852 u = 0; |
| 1799 } | 1853 } |
| 1800 ASSERT(offset % 4 == 0); | 1854 |
| 1801 ASSERT((offset / 4) < 256); | |
| 1802 ASSERT(offset >= 0); | 1855 ASSERT(offset >= 0); |
| 1803 emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | | 1856 if ((offset % 4) == 0 && (offset / 4) < 256) { |
| 1804 0xB*B8 | ((offset / 4) & 255)); | 1857 emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | |
| 1858 0xB*B8 | ((offset / 4) & 255)); |
| 1859 } else { |
| 1860 // Larger offsets must be handled by computing the correct address |
| 1861 // in the ip register. |
| 1862 ASSERT(!base.is(ip)); |
| 1863 if (u == 1) { |
| 1864 add(ip, base, Operand(offset)); |
| 1865 } else { |
| 1866 sub(ip, base, Operand(offset)); |
| 1867 } |
| 1868 emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8); |
| 1869 } |
| 1870 } |
| 1871 |
| 1872 |
| 1873 void Assembler::vldr(const DwVfpRegister dst, |
| 1874 const MemOperand& operand, |
| 1875 const Condition cond) { |
| 1876 ASSERT(!operand.rm().is_valid()); |
| 1877 ASSERT(operand.am_ == Offset); |
| 1878 vldr(dst, operand.rn(), operand.offset(), cond); |
| 1805 } | 1879 } |
| 1806 | 1880 |
| 1807 | 1881 |
| 1808 void Assembler::vldr(const SwVfpRegister dst, | 1882 void Assembler::vldr(const SwVfpRegister dst, |
| 1809 const Register base, | 1883 const Register base, |
| 1810 int offset, | 1884 int offset, |
| 1811 const Condition cond) { | 1885 const Condition cond) { |
| 1812 // Sdst = MEM(Rbase + offset). | 1886 // Sdst = MEM(Rbase + offset). |
| 1813 // Instruction details available in ARM DDI 0406A, A8-628. | 1887 // Instruction details available in ARM DDI 0406A, A8-628. |
| 1814 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | | 1888 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | |
| 1815 // Vdst(15-12) | 1010(11-8) | offset | 1889 // Vdst(15-12) | 1010(11-8) | offset |
| 1816 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); | 1890 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); |
| 1817 int u = 1; | 1891 int u = 1; |
| 1818 if (offset < 0) { | 1892 if (offset < 0) { |
| 1819 offset = -offset; | 1893 offset = -offset; |
| 1820 u = 0; | 1894 u = 0; |
| 1821 } | 1895 } |
| 1822 ASSERT(offset % 4 == 0); | |
| 1823 ASSERT((offset / 4) < 256); | |
| 1824 ASSERT(offset >= 0); | |
| 1825 int sd, d; | 1896 int sd, d; |
| 1826 dst.split_code(&sd, &d); | 1897 dst.split_code(&sd, &d); |
| 1898 ASSERT(offset >= 0); |
| 1899 |
| 1900 if ((offset % 4) == 0 && (offset / 4) < 256) { |
| 1827 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | | 1901 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | |
| 1828 0xA*B8 | ((offset / 4) & 255)); | 1902 0xA*B8 | ((offset / 4) & 255)); |
| 1903 } else { |
| 1904 // Larger offsets must be handled by computing the correct address |
| 1905 // in the ip register. |
| 1906 ASSERT(!base.is(ip)); |
| 1907 if (u == 1) { |
| 1908 add(ip, base, Operand(offset)); |
| 1909 } else { |
| 1910 sub(ip, base, Operand(offset)); |
| 1911 } |
| 1912 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); |
| 1913 } |
| 1914 } |
| 1915 |
| 1916 |
| 1917 void Assembler::vldr(const SwVfpRegister dst, |
| 1918 const MemOperand& operand, |
| 1919 const Condition cond) { |
| 1920 ASSERT(!operand.rm().is_valid()); |
| 1921 ASSERT(operand.am_ == Offset); |
| 1922 vldr(dst, operand.rn(), operand.offset(), cond); |
| 1829 } | 1923 } |
| 1830 | 1924 |
| 1831 | 1925 |
| 1832 void Assembler::vstr(const DwVfpRegister src, | 1926 void Assembler::vstr(const DwVfpRegister src, |
| 1833 const Register base, | 1927 const Register base, |
| 1834 int offset, | 1928 int offset, |
| 1835 const Condition cond) { | 1929 const Condition cond) { |
| 1836 // MEM(Rbase + offset) = Dsrc. | 1930 // MEM(Rbase + offset) = Dsrc. |
| 1837 // Instruction details available in ARM DDI 0406A, A8-786. | 1931 // Instruction details available in ARM DDI 0406A, A8-786. |
| 1838 // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) | | 1932 // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) | |
| 1839 // Vsrc(15-12) | 1011(11-8) | (offset/4) | 1933 // Vsrc(15-12) | 1011(11-8) | (offset/4) |
| 1840 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); | 1934 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); |
| 1841 int u = 1; | 1935 int u = 1; |
| 1842 if (offset < 0) { | 1936 if (offset < 0) { |
| 1843 offset = -offset; | 1937 offset = -offset; |
| 1844 u = 0; | 1938 u = 0; |
| 1845 } | 1939 } |
| 1846 ASSERT(offset % 4 == 0); | |
| 1847 ASSERT((offset / 4) < 256); | |
| 1848 ASSERT(offset >= 0); | 1940 ASSERT(offset >= 0); |
| 1849 emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | | 1941 if ((offset % 4) == 0 && (offset / 4) < 256) { |
| 1850 0xB*B8 | ((offset / 4) & 255)); | 1942 emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | |
| 1943 0xB*B8 | ((offset / 4) & 255)); |
| 1944 } else { |
| 1945 // Larger offsets must be handled by computing the correct address |
| 1946 // in the ip register. |
| 1947 ASSERT(!base.is(ip)); |
| 1948 if (u == 1) { |
| 1949 add(ip, base, Operand(offset)); |
| 1950 } else { |
| 1951 sub(ip, base, Operand(offset)); |
| 1952 } |
| 1953 emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8); |
| 1954 } |
| 1955 } |
| 1956 |
| 1957 |
| 1958 void Assembler::vstr(const DwVfpRegister src, |
| 1959 const MemOperand& operand, |
| 1960 const Condition cond) { |
| 1961 ASSERT(!operand.rm().is_valid()); |
| 1962 ASSERT(operand.am_ == Offset); |
| 1963 vstr(src, operand.rn(), operand.offset(), cond); |
| 1851 } | 1964 } |
| 1852 | 1965 |
| 1853 | 1966 |
| 1854 void Assembler::vstr(const SwVfpRegister src, | 1967 void Assembler::vstr(const SwVfpRegister src, |
| 1855 const Register base, | 1968 const Register base, |
| 1856 int offset, | 1969 int offset, |
| 1857 const Condition cond) { | 1970 const Condition cond) { |
| 1858 // MEM(Rbase + offset) = SSrc. | 1971 // MEM(Rbase + offset) = SSrc. |
| 1859 // Instruction details available in ARM DDI 0406A, A8-786. | 1972 // Instruction details available in ARM DDI 0406A, A8-786. |
| 1860 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | | 1973 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | |
| 1861 // Vdst(15-12) | 1010(11-8) | (offset/4) | 1974 // Vdst(15-12) | 1010(11-8) | (offset/4) |
| 1862 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); | 1975 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3)); |
| 1863 int u = 1; | 1976 int u = 1; |
| 1864 if (offset < 0) { | 1977 if (offset < 0) { |
| 1865 offset = -offset; | 1978 offset = -offset; |
| 1866 u = 0; | 1979 u = 0; |
| 1867 } | 1980 } |
| 1868 ASSERT(offset % 4 == 0); | |
| 1869 ASSERT((offset / 4) < 256); | |
| 1870 ASSERT(offset >= 0); | |
| 1871 int sd, d; | 1981 int sd, d; |
| 1872 src.split_code(&sd, &d); | 1982 src.split_code(&sd, &d); |
| 1873 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | | 1983 ASSERT(offset >= 0); |
| 1874 0xA*B8 | ((offset / 4) & 255)); | 1984 if ((offset % 4) == 0 && (offset / 4) < 256) { |
| 1985 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | |
| 1986 0xA*B8 | ((offset / 4) & 255)); |
| 1987 } else { |
| 1988 // Larger offsets must be handled by computing the correct address |
| 1989 // in the ip register. |
| 1990 ASSERT(!base.is(ip)); |
| 1991 if (u == 1) { |
| 1992 add(ip, base, Operand(offset)); |
| 1993 } else { |
| 1994 sub(ip, base, Operand(offset)); |
| 1995 } |
| 1996 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); |
| 1997 } |
| 1998 } |
| 1999 |
| 2000 |
| 2001 void Assembler::vstr(const SwVfpRegister src, |
| 2002 const MemOperand& operand, |
| 2003 const Condition cond) { |
| 2004 ASSERT(!operand.rm().is_valid()); |
| 2005 ASSERT(operand.am_ == Offset); |
| 2006 vldr(src, operand.rn(), operand.offset(), cond); |
| 1875 } | 2007 } |
| 1876 | 2008 |
| 1877 | 2009 |
| 1878 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { | 2010 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { |
| 1879 uint64_t i; | 2011 uint64_t i; |
| 1880 memcpy(&i, &d, 8); | 2012 memcpy(&i, &d, 8); |
| 1881 | 2013 |
| 1882 *lo = i & 0xffffffff; | 2014 *lo = i & 0xffffffff; |
| 1883 *hi = i >> 32; | 2015 *hi = i >> 32; |
| 1884 } | 2016 } |
| (...skipping 474 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2359 | 2491 |
| 2360 // Pseudo instructions. | 2492 // Pseudo instructions. |
| 2361 void Assembler::nop(int type) { | 2493 void Assembler::nop(int type) { |
| 2362 // This is mov rx, rx. | 2494 // This is mov rx, rx. |
| 2363 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. | 2495 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. |
| 2364 emit(al | 13*B21 | type*B12 | type); | 2496 emit(al | 13*B21 | type*B12 | type); |
| 2365 } | 2497 } |
| 2366 | 2498 |
| 2367 | 2499 |
| 2368 bool Assembler::IsNop(Instr instr, int type) { | 2500 bool Assembler::IsNop(Instr instr, int type) { |
| 2369 // Check for mov rx, rx. | 2501 // Check for mov rx, rx where x = type. |
| 2370 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. | 2502 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. |
| 2371 return instr == (al | 13*B21 | type*B12 | type); | 2503 return instr == (al | 13*B21 | type*B12 | type); |
| 2372 } | 2504 } |
| 2373 | 2505 |
| 2374 | 2506 |
| 2375 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { | 2507 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { |
| 2376 uint32_t dummy1; | 2508 uint32_t dummy1; |
| 2377 uint32_t dummy2; | 2509 uint32_t dummy2; |
| 2378 return fits_shifter(imm32, &dummy1, &dummy2, NULL); | 2510 return fits_shifter(imm32, &dummy1, &dummy2, NULL); |
| 2379 } | 2511 } |
| (...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2618 | 2750 |
| 2619 // Since a constant pool was just emitted, move the check offset forward by | 2751 // Since a constant pool was just emitted, move the check offset forward by |
| 2620 // the standard interval. | 2752 // the standard interval. |
| 2621 next_buffer_check_ = pc_offset() + kCheckConstInterval; | 2753 next_buffer_check_ = pc_offset() + kCheckConstInterval; |
| 2622 } | 2754 } |
| 2623 | 2755 |
| 2624 | 2756 |
| 2625 } } // namespace v8::internal | 2757 } } // namespace v8::internal |
| 2626 | 2758 |
| 2627 #endif // V8_TARGET_ARCH_ARM | 2759 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |