OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
6 | 6 |
7 #include <limits> | 7 #include <limits> |
8 | 8 |
9 #include "src/compilation-info.h" | 9 #include "src/compilation-info.h" |
10 #include "src/compiler/code-generator-impl.h" | 10 #include "src/compiler/code-generator-impl.h" |
(...skipping 2125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2136 case kX64Poke: { | 2136 case kX64Poke: { |
2137 int const slot = MiscField::decode(instr->opcode()); | 2137 int const slot = MiscField::decode(instr->opcode()); |
2138 if (HasImmediateInput(instr, 0)) { | 2138 if (HasImmediateInput(instr, 0)) { |
2139 __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0)); | 2139 __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0)); |
2140 } else { | 2140 } else { |
2141 __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0)); | 2141 __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0)); |
2142 } | 2142 } |
2143 break; | 2143 break; |
2144 } | 2144 } |
2145 case kX64Int32x4Splat: { | 2145 case kX64Int32x4Splat: { |
2146 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
2147 XMMRegister dst = i.OutputSimd128Register(); | 2146 XMMRegister dst = i.OutputSimd128Register(); |
2148 __ Movd(dst, i.InputRegister(0)); | 2147 __ movd(dst, i.InputRegister(0)); |
2149 __ shufps(dst, dst, 0x0); | 2148 __ pshufd(dst, dst, 0x0); |
2150 break; | 2149 break; |
2151 } | 2150 } |
2152 case kX64Int32x4ExtractLane: { | 2151 case kX64Int32x4ExtractLane: { |
2153 CpuFeatureScope sse_scope(masm(), SSE4_1); | 2152 CpuFeatureScope sse_scope(masm(), SSE4_1); |
2154 __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); | 2153 __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); |
2155 break; | 2154 break; |
2156 } | 2155 } |
2157 case kX64Int32x4ReplaceLane: { | 2156 case kX64Int32x4ReplaceLane: { |
2158 CpuFeatureScope sse_scope(masm(), SSE4_1); | 2157 CpuFeatureScope sse_scope(masm(), SSE4_1); |
2159 if (instr->InputAt(2)->IsRegister()) { | 2158 if (instr->InputAt(2)->IsRegister()) { |
2160 __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2), | 2159 __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2), |
2161 i.InputInt8(1)); | 2160 i.InputInt8(1)); |
2162 } else { | 2161 } else { |
2163 __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); | 2162 __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); |
2164 } | 2163 } |
2165 break; | 2164 break; |
2166 } | 2165 } |
2167 case kX64Int32x4Add: { | 2166 case kX64Int32x4Add: { |
2168 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
2169 __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1)); | 2167 __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1)); |
2170 break; | 2168 break; |
2171 } | 2169 } |
2172 case kX64Int32x4Sub: { | 2170 case kX64Int32x4Sub: { |
2173 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
2174 __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1)); | 2171 __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1)); |
2175 break; | 2172 break; |
2176 } | 2173 } |
2174 case kX64Int32x4Mul: { | |
2175 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
bbudge
2017/03/14 17:50:39
Do all x64 chips support SSE4.1? This makes me won
gdeepti
2017/03/14 21:32:52
Summarizing offline discussions, As SupportsSimd12
| |
2176 __ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1)); | |
2177 break; | |
2178 } | |
2179 case kX64Int32x4Min: { | |
2180 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
2181 __ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); | |
2182 break; | |
2183 } | |
2184 case kX64Int32x4Max: { | |
2185 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
2186 __ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); | |
2187 break; | |
2188 } | |
2189 case kX64Uint32x4Min: { | |
2190 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
2191 __ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1)); | |
2192 break; | |
2193 } | |
2194 case kX64Uint32x4Max: { | |
2195 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
2196 __ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1)); | |
2197 break; | |
2198 } | |
2199 case kX64Int32x4Equal: { | |
2200 __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); | |
2201 break; | |
2202 } | |
2203 case kX64Int32x4NotEqual: { | |
2204 __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); | |
2205 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); | |
2206 __ pxor(i.OutputSimd128Register(), kScratchDoubleReg); | |
2207 break; | |
2208 } | |
2209 case kX64Int32x4ShiftLeftByScalar: { | |
2210 __ pslld(i.OutputSimd128Register(), i.InputInt8(1)); | |
2211 break; | |
2212 } | |
2213 case kX64Int32x4ShiftRightByScalar: { | |
2214 __ psrad(i.OutputSimd128Register(), i.InputInt8(1)); | |
2215 break; | |
2216 } | |
2217 case kX64Uint32x4ShiftRightByScalar: { | |
2218 __ psrld(i.OutputSimd128Register(), i.InputInt8(1)); | |
2219 break; | |
2220 } | |
2221 case kX64Simd32x4Select: { | |
2222 // Mask used here is stored in dst. | |
2223 XMMRegister dst = i.OutputSimd128Register(); | |
2224 __ movaps(kScratchDoubleReg, i.InputSimd128Register(1)); | |
2225 __ xorps(kScratchDoubleReg, i.InputSimd128Register(2)); | |
2226 __ andps(dst, kScratchDoubleReg); | |
2227 __ xorps(dst, i.InputSimd128Register(2)); | |
2228 break; | |
2229 } | |
2177 case kX64Simd128Zero: { | 2230 case kX64Simd128Zero: { |
2178 CpuFeatureScope sse_scope(masm(), SSE4_1); | |
2179 XMMRegister dst = i.OutputSimd128Register(); | 2231 XMMRegister dst = i.OutputSimd128Register(); |
2180 __ xorps(dst, dst); | 2232 __ xorps(dst, dst); |
2181 break; | 2233 break; |
2182 } | 2234 } |
2183 case kCheckedLoadInt8: | 2235 case kCheckedLoadInt8: |
2184 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl); | 2236 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl); |
2185 break; | 2237 break; |
2186 case kCheckedLoadUint8: | 2238 case kCheckedLoadUint8: |
2187 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl); | 2239 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl); |
2188 break; | 2240 break; |
(...skipping 690 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2879 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; | 2931 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; |
2880 __ Nop(padding_size); | 2932 __ Nop(padding_size); |
2881 } | 2933 } |
2882 } | 2934 } |
2883 | 2935 |
2884 #undef __ | 2936 #undef __ |
2885 | 2937 |
2886 } // namespace compiler | 2938 } // namespace compiler |
2887 } // namespace internal | 2939 } // namespace internal |
2888 } // namespace v8 | 2940 } // namespace v8 |
OLD | NEW |