Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(144)

Side by Side Diff: src/x64/assembler-x64.cc

Issue 146022: X64: Addition binary operation. (Closed)
Patch Set: Addressed review comments (and updated from svn) Created 11 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/x64/assembler-x64.h ('k') | src/x64/assembler-x64-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
106 } else if (is_int8(disp)) { 106 } else if (is_int8(disp)) {
107 set_modrm(1, rsp); 107 set_modrm(1, rsp);
108 set_disp8(disp); 108 set_disp8(disp);
109 } else { 109 } else {
110 set_modrm(2, rsp); 110 set_modrm(2, rsp);
111 set_disp32(disp); 111 set_disp32(disp);
112 } 112 }
113 } 113 }
114 114
115 115
116 // Safe default is no features. 116 // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
117 // TODO(X64): Safe defaults include SSE2 for X64. 117 // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
118 uint64_t CpuFeatures::supported_ = 0; 118 uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
119 uint64_t CpuFeatures::enabled_ = 0; 119 uint64_t CpuFeatures::enabled_ = 0;
120 120
121 void CpuFeatures::Probe() { 121 void CpuFeatures::Probe() {
122 ASSERT(Heap::HasBeenSetup()); 122 ASSERT(Heap::HasBeenSetup());
123 ASSERT(supported_ == 0); 123 ASSERT(supported_ == kDefaultCpuFeatures);
124 if (Serializer::enabled()) return; // No features if we might serialize. 124 if (Serializer::enabled()) return; // No features if we might serialize.
125 125
126 Assembler assm(NULL, 0); 126 Assembler assm(NULL, 0);
127 Label cpuid, done; 127 Label cpuid, done;
128 #define __ assm. 128 #define __ assm.
129 // Save old esp, since we are going to modify the stack. 129 // Save old rsp, since we are going to modify the stack.
130 __ push(rbp); 130 __ push(rbp);
131 __ pushfq(); 131 __ pushfq();
132 __ push(rcx); 132 __ push(rcx);
133 __ push(rbx); 133 __ push(rbx);
134 __ movq(rbp, rsp); 134 __ movq(rbp, rsp);
135 135
136 // If we can modify bit 21 of the EFLAGS register, then CPUID is supported. 136 // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
137 __ pushfq(); 137 __ pushfq();
138 __ pop(rax); 138 __ pop(rax);
139 __ movq(rdx, rax); 139 __ movq(rdx, rax);
140 __ xor_(rax, Immediate(0x200000)); // Flip bit 21. 140 __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
141 __ push(rax); 141 __ push(rax);
142 __ popfq(); 142 __ popfq();
143 __ pushfq(); 143 __ pushfq();
144 __ pop(rax); 144 __ pop(rax);
145 __ xor_(rax, rdx); // Different if CPUID is supported. 145 __ xor_(rax, rdx); // Different if CPUID is supported.
146 __ j(not_zero, &cpuid); 146 __ j(not_zero, &cpuid);
147 147
148 // CPUID not supported. Clear the supported features in edx:eax. 148 // CPUID not supported. Clear the supported features in edx:eax.
149 __ xor_(rax, rax); 149 __ xor_(rax, rax);
150 __ jmp(&done); 150 __ jmp(&done);
151 151
152 // Invoke CPUID with 1 in eax to get feature information in 152 // Invoke CPUID with 1 in eax to get feature information in
153 // ecx:edx. Temporarily enable CPUID support because we know it's 153 // ecx:edx. Temporarily enable CPUID support because we know it's
154 // safe here. 154 // safe here.
155 __ bind(&cpuid); 155 __ bind(&cpuid);
156 __ movq(rax, Immediate(1)); 156 __ movq(rax, Immediate(1));
157 supported_ = (1 << CPUID); 157 supported_ = kDefaultCpuFeatures | (1 << CPUID);
158 { Scope fscope(CPUID); 158 { Scope fscope(CPUID);
159 __ cpuid(); 159 __ cpuid();
160 } 160 }
161 supported_ = 0; 161 supported_ = kDefaultCpuFeatures;
162 162
163 // Move the result from ecx:edx to rax and make sure to mark the 163 // Move the result from ecx:edx to rax and make sure to mark the
164 // CPUID feature as supported. 164 // CPUID feature as supported.
165 __ movl(rax, rdx); // Zero-extended to 64 bits. 165 __ movl(rax, rdx); // Zero-extended to 64 bits.
166 __ shl(rcx, Immediate(32)); 166 __ shl(rcx, Immediate(32));
167 __ or_(rax, rcx); 167 __ or_(rax, rcx);
168 __ or_(rax, Immediate(1 << CPUID)); 168 __ or_(rax, Immediate(1 << CPUID));
169 169
170 // Done. 170 // Done.
171 __ bind(&done); 171 __ bind(&done);
172 __ movq(rsp, rbp); 172 __ movq(rsp, rbp);
173 __ pop(rbx); 173 __ pop(rbx);
174 __ pop(rcx); 174 __ pop(rcx);
175 __ popfq(); 175 __ popfq();
176 __ pop(rbp); 176 __ pop(rbp);
177 __ ret(0); 177 __ ret(0);
178 #undef __ 178 #undef __
179 179
180 CodeDesc desc; 180 CodeDesc desc;
181 assm.GetCode(&desc); 181 assm.GetCode(&desc);
182 Object* code = 182 Object* code =
183 Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL); 183 Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
184 if (!code->IsCode()) return; 184 if (!code->IsCode()) return;
185 LOG(CodeCreateEvent(Logger::BUILTIN_TAG, 185 LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
186 Code::cast(code), "CpuFeatures::Probe")); 186 Code::cast(code), "CpuFeatures::Probe"));
187 typedef uint64_t (*F0)(); 187 typedef uint64_t (*F0)();
188 F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry()); 188 F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
189 supported_ = probe(); 189 supported_ = probe();
190 // SSE2 and CMOV must be available on an X64 CPU.
191 ASSERT(IsSupported(CPUID));
192 ASSERT(IsSupported(SSE2));
193 ASSERT(IsSupported(CMOV));
190 } 194 }
191 195
192 // ----------------------------------------------------------------------------- 196 // -----------------------------------------------------------------------------
193 // Implementation of Assembler 197 // Implementation of Assembler
194 198
195 #ifdef GENERATED_CODE_COVERAGE 199 #ifdef GENERATED_CODE_COVERAGE
196 static void InitCoverageLog(); 200 static void InitCoverageLog();
197 #endif 201 #endif
198 202
199 byte* Assembler::spare_buffer_ = NULL; 203 byte* Assembler::spare_buffer_ = NULL;
(...skipping 403 matching lines...) Expand 10 before | Expand all | Expand 10 after
603 void Assembler::call(const Operand& op) { 607 void Assembler::call(const Operand& op) {
604 EnsureSpace ensure_space(this); 608 EnsureSpace ensure_space(this);
605 last_pc_ = pc_; 609 last_pc_ = pc_;
606 // Opcode: FF /2 m64 610 // Opcode: FF /2 m64
607 emit_rex_64(op); 611 emit_rex_64(op);
608 emit(0xFF); 612 emit(0xFF);
609 emit_operand(2, op); 613 emit_operand(2, op);
610 } 614 }
611 615
612 616
617 void Assembler::cmovq(Condition cc, Register dst, Register src) {
618 // No need to check CpuInfo for CMOV support, it's a required part of the
619 // 64-bit architecture.
620 ASSERT(cc >= 0); // Use mov for unconditional moves.
621 EnsureSpace ensure_space(this);
622 last_pc_ = pc_;
623 // Opcode: REX.W 0f 40 + cc /r
624 emit_rex_64(dst, src);
625 emit(0x0f);
626 emit(0x40 + cc);
627 emit_modrm(dst, src);
628 }
629
630
631 void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
632 ASSERT(cc >= 0);
633 EnsureSpace ensure_space(this);
634 last_pc_ = pc_;
635 // Opcode: REX.W 0f 40 + cc /r
636 emit_rex_64(dst, src);
637 emit(0x0f);
638 emit(0x40 + cc);
639 emit_operand(dst, src);
640 }
641
642
643 void Assembler::cmovl(Condition cc, Register dst, Register src) {
644 ASSERT(cc >= 0);
645 EnsureSpace ensure_space(this);
646 last_pc_ = pc_;
647 // Opcode: 0f 40 + cc /r
648 emit_optional_rex_32(dst, src);
649 emit(0x0f);
650 emit(0x40 + cc);
651 emit_modrm(dst, src);
652 }
653
654
655 void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
656 ASSERT(cc >= 0);
657 EnsureSpace ensure_space(this);
658 last_pc_ = pc_;
659 // Opcode: 0f 40 + cc /r
660 emit_optional_rex_32(dst, src);
661 emit(0x0f);
662 emit(0x40 + cc);
663 emit_operand(dst, src);
664 }
665
666
667
613 void Assembler::cpuid() { 668 void Assembler::cpuid() {
614 ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID)); 669 ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
615 EnsureSpace ensure_space(this); 670 EnsureSpace ensure_space(this);
616 last_pc_ = pc_; 671 last_pc_ = pc_;
617 emit(0x0F); 672 emit(0x0F);
618 emit(0xA2); 673 emit(0xA2);
619 } 674 }
620 675
621 676
622 void Assembler::cqo() { 677 void Assembler::cqo() {
(...skipping 1122 matching lines...) Expand 10 before | Expand all | Expand 10 after
1745 1800
1746 1801
1747 void Assembler::fnclex() { 1802 void Assembler::fnclex() {
1748 EnsureSpace ensure_space(this); 1803 EnsureSpace ensure_space(this);
1749 last_pc_ = pc_; 1804 last_pc_ = pc_;
1750 emit(0xDB); 1805 emit(0xDB);
1751 emit(0xE2); 1806 emit(0xE2);
1752 } 1807 }
1753 1808
1754 1809
1810 void Assembler::sahf() {
1811 // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
1812 // in 64-bit mode. Test CpuID.
1813 EnsureSpace ensure_space(this);
1814 last_pc_ = pc_;
1815 emit(0x9E);
1816 }
1817
1818
1755 void Assembler::emit_farith(int b1, int b2, int i) { 1819 void Assembler::emit_farith(int b1, int b2, int i) {
1756 ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode 1820 ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
1757 ASSERT(is_uint3(i)); // illegal stack offset 1821 ASSERT(is_uint3(i)); // illegal stack offset
1758 emit(b1); 1822 emit(b1);
1759 emit(b2 + i); 1823 emit(b2 + i);
1760 } 1824 }
1761 1825
1826 // SSE 2 operations
1827
1828 void Assembler::movsd(const Operand& dst, XMMRegister src) {
1829 EnsureSpace ensure_space(this);
1830 last_pc_ = pc_;
1831 emit(0xF2); // double
1832 emit_optional_rex_32(src, dst);
1833 emit(0x0F);
1834 emit(0x11); // store
1835 emit_sse_operand(src, dst);
1836 }
1837
1838
1839 void Assembler::movsd(Register dst, XMMRegister src) {
1840 EnsureSpace ensure_space(this);
1841 last_pc_ = pc_;
1842 emit(0xF2); // double
1843 emit_optional_rex_32(src, dst);
1844 emit(0x0F);
1845 emit(0x11); // store
1846 emit_sse_operand(src, dst);
1847 }
1848
1849
1850 void Assembler::movsd(XMMRegister dst, Register src) {
1851 EnsureSpace ensure_space(this);
1852 last_pc_ = pc_;
1853 emit(0xF2); // double
1854 emit_optional_rex_32(dst, src);
1855 emit(0x0F);
1856 emit(0x10); // load
1857 emit_sse_operand(dst, src);
1858 }
1859
1860
1861 void Assembler::movsd(XMMRegister dst, const Operand& src) {
1862 EnsureSpace ensure_space(this);
1863 last_pc_ = pc_;
1864 emit(0xF2); // double
1865 emit_optional_rex_32(dst, src);
1866 emit(0x0F);
1867 emit(0x10); // load
1868 emit_sse_operand(dst, src);
1869 }
1870
1871
1872 void Assembler::cvttss2si(Register dst, const Operand& src) {
1873 EnsureSpace ensure_space(this);
1874 last_pc_ = pc_;
1875 emit(0xF3);
1876 emit_optional_rex_32(dst, src);
1877 emit(0x0F);
1878 emit(0x2C);
1879 emit_operand(dst, src);
1880 }
1881
1882
1883 void Assembler::cvttsd2si(Register dst, const Operand& src) {
1884 EnsureSpace ensure_space(this);
1885 last_pc_ = pc_;
1886 emit(0xF2);
1887 emit_optional_rex_32(dst, src);
1888 emit(0x0F);
1889 emit(0x2C);
1890 emit_operand(dst, src);
1891 }
1892
1893
1894 void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
1895 EnsureSpace ensure_space(this);
1896 last_pc_ = pc_;
1897 emit(0xF2);
1898 emit_optional_rex_32(dst, src);
1899 emit(0x0F);
1900 emit(0x2A);
1901 emit_sse_operand(dst, src);
1902 }
1903
1904
1905 void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
1906 EnsureSpace ensure_space(this);
1907 last_pc_ = pc_;
1908 emit(0xF2);
1909 emit_optional_rex_32(dst, src);
1910 emit(0x0F);
1911 emit(0x2A);
1912 emit_sse_operand(dst, src);
1913 }
1914
1915
1916 void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
1917 EnsureSpace ensure_space(this);
1918 last_pc_ = pc_;
1919 emit(0xF2);
1920 emit_rex_64(dst, src);
1921 emit(0x0F);
1922 emit(0x2A);
1923 emit_sse_operand(dst, src);
1924 }
1925
1926
1927 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
1928 EnsureSpace ensure_space(this);
1929 last_pc_ = pc_;
1930 emit(0xF2);
1931 emit_optional_rex_32(dst, src);
1932 emit(0x0F);
1933 emit(0x58);
1934 emit_sse_operand(dst, src);
1935 }
1936
1937
1938 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
1939 EnsureSpace ensure_space(this);
1940 last_pc_ = pc_;
1941 emit(0xF2);
1942 emit_optional_rex_32(dst, src);
1943 emit(0x0F);
1944 emit(0x59);
1945 emit_sse_operand(dst, src);
1946 }
1947
1948
1949 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
1950 EnsureSpace ensure_space(this);
1951 last_pc_ = pc_;
1952 emit(0xF2);
1953 emit_optional_rex_32(dst, src);
1954 emit(0x0F);
1955 emit(0x5C);
1956 emit_sse_operand(dst, src);
1957 }
1958
1959
1960 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
1961 EnsureSpace ensure_space(this);
1962 last_pc_ = pc_;
1963 emit(0xF2);
1964 emit_optional_rex_32(dst, src);
1965 emit(0x0F);
1966 emit(0x5E);
1967 emit_sse_operand(dst, src);
1968 }
1969
1970
1971
1972 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
1973 Register ireg = { reg.code() };
1974 emit_operand(ireg, adr);
1975 }
1976
1977
1978 void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
1979 emit(0xC0 | (dst.code() << 3) | src.code());
1980 }
1981
1982 void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
1983 emit(0xC0 | (dst.code() << 3) | src.code());
1984 }
1985
1762 1986
1763 // Relocation information implementations 1987 // Relocation information implementations
1764 1988
1765 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { 1989 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1766 ASSERT(rmode != RelocInfo::NONE); 1990 ASSERT(rmode != RelocInfo::NONE);
1767 // Don't record external references unless the heap will be serialized. 1991 // Don't record external references unless the heap will be serialized.
1768 if (rmode == RelocInfo::EXTERNAL_REFERENCE && 1992 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1769 !Serializer::enabled() && 1993 !Serializer::enabled() &&
1770 !FLAG_debug_code) { 1994 !FLAG_debug_code) {
1771 return; 1995 return;
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
1865 bool BreakLocationIterator::IsDebugBreakAtReturn() { 2089 bool BreakLocationIterator::IsDebugBreakAtReturn() {
1866 UNIMPLEMENTED(); 2090 UNIMPLEMENTED();
1867 return false; 2091 return false;
1868 } 2092 }
1869 2093
1870 void BreakLocationIterator::SetDebugBreakAtReturn() { 2094 void BreakLocationIterator::SetDebugBreakAtReturn() {
1871 UNIMPLEMENTED(); 2095 UNIMPLEMENTED();
1872 } 2096 }
1873 2097
1874 } } // namespace v8::internal 2098 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/x64/assembler-x64.h ('k') | src/x64/assembler-x64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698