| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
| 6 #include "src/base/bits.h" | 6 #include "src/base/bits.h" |
| 7 #include "src/compiler/instruction-selector-impl.h" | 7 #include "src/compiler/instruction-selector-impl.h" |
| 8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
| 9 #include "src/compiler/node-properties.h" | 9 #include "src/compiler/node-properties.h" |
| 10 | 10 |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 85 } | 85 } |
| 86 | 86 |
| 87 | 87 |
| 88 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 88 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| 89 ArmOperandGenerator g(selector); | 89 ArmOperandGenerator g(selector); |
| 90 selector->Emit(opcode, g.DefineAsRegister(node), | 90 selector->Emit(opcode, g.DefineAsRegister(node), |
| 91 g.UseRegister(node->InputAt(0)), | 91 g.UseRegister(node->InputAt(0)), |
| 92 g.UseRegister(node->InputAt(1))); | 92 g.UseRegister(node->InputAt(1))); |
| 93 } | 93 } |
| 94 | 94 |
| 95 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| 96 ArmOperandGenerator g(selector); |
| 97 selector->Emit( |
| 98 opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), |
| 99 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); |
| 100 } |
| 101 |
| 95 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 102 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| 96 ArmOperandGenerator g(selector); | 103 ArmOperandGenerator g(selector); |
| 97 int32_t imm = OpParameter<int32_t>(node); | 104 int32_t imm = OpParameter<int32_t>(node); |
| 98 selector->Emit(opcode, g.DefineAsRegister(node), | 105 selector->Emit(opcode, g.DefineAsRegister(node), |
| 99 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); | 106 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); |
| 100 } | 107 } |
| 101 | 108 |
| 102 void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 109 void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| 103 ArmOperandGenerator g(selector); | 110 ArmOperandGenerator g(selector); |
| 104 int32_t imm = OpParameter<int32_t>(node); | 111 int32_t imm = OpParameter<int32_t>(node); |
| (...skipping 2055 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2160 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); | 2167 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); |
| 2161 Emit(code, 0, nullptr, input_count, inputs); | 2168 Emit(code, 0, nullptr, input_count, inputs); |
| 2162 } | 2169 } |
| 2163 | 2170 |
| 2164 #define SIMD_TYPE_LIST(V) \ | 2171 #define SIMD_TYPE_LIST(V) \ |
| 2165 V(Float32x4) \ | 2172 V(Float32x4) \ |
| 2166 V(Int32x4) \ | 2173 V(Int32x4) \ |
| 2167 V(Int16x8) \ | 2174 V(Int16x8) \ |
| 2168 V(Int8x16) | 2175 V(Int8x16) |
| 2169 | 2176 |
| 2177 #define SIMD_FORMAT_LIST(V) \ |
| 2178 V(32x4) \ |
| 2179 V(16x8) \ |
| 2180 V(8x16) |
| 2181 |
| 2170 #define SIMD_UNOP_LIST(V) \ | 2182 #define SIMD_UNOP_LIST(V) \ |
| 2171 V(Float32x4FromInt32x4) \ | 2183 V(Float32x4FromInt32x4) \ |
| 2172 V(Float32x4FromUint32x4) \ | 2184 V(Float32x4FromUint32x4) \ |
| 2173 V(Float32x4Abs) \ | 2185 V(Float32x4Abs) \ |
| 2174 V(Float32x4Neg) \ | 2186 V(Float32x4Neg) \ |
| 2175 V(Int32x4FromFloat32x4) \ | 2187 V(Int32x4FromFloat32x4) \ |
| 2176 V(Uint32x4FromFloat32x4) \ | 2188 V(Uint32x4FromFloat32x4) \ |
| 2177 V(Int32x4Neg) \ | 2189 V(Int32x4Neg) \ |
| 2178 V(Int16x8Neg) \ | 2190 V(Int16x8Neg) \ |
| 2179 V(Int8x16Neg) | 2191 V(Int8x16Neg) \ |
| 2192 V(Simd128Not) |
| 2180 | 2193 |
| 2181 #define SIMD_BINOP_LIST(V) \ | 2194 #define SIMD_BINOP_LIST(V) \ |
| 2182 V(Float32x4Add) \ | 2195 V(Float32x4Add) \ |
| 2183 V(Float32x4Sub) \ | 2196 V(Float32x4Sub) \ |
| 2184 V(Float32x4Equal) \ | 2197 V(Float32x4Equal) \ |
| 2185 V(Float32x4NotEqual) \ | 2198 V(Float32x4NotEqual) \ |
| 2186 V(Int32x4Add) \ | 2199 V(Int32x4Add) \ |
| 2187 V(Int32x4Sub) \ | 2200 V(Int32x4Sub) \ |
| 2188 V(Int32x4Mul) \ | 2201 V(Int32x4Mul) \ |
| 2189 V(Int32x4Min) \ | 2202 V(Int32x4Min) \ |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2222 V(Int8x16Max) \ | 2235 V(Int8x16Max) \ |
| 2223 V(Int8x16Equal) \ | 2236 V(Int8x16Equal) \ |
| 2224 V(Int8x16NotEqual) \ | 2237 V(Int8x16NotEqual) \ |
| 2225 V(Int8x16GreaterThan) \ | 2238 V(Int8x16GreaterThan) \ |
| 2226 V(Int8x16GreaterThanOrEqual) \ | 2239 V(Int8x16GreaterThanOrEqual) \ |
| 2227 V(Uint8x16AddSaturate) \ | 2240 V(Uint8x16AddSaturate) \ |
| 2228 V(Uint8x16SubSaturate) \ | 2241 V(Uint8x16SubSaturate) \ |
| 2229 V(Uint8x16Min) \ | 2242 V(Uint8x16Min) \ |
| 2230 V(Uint8x16Max) \ | 2243 V(Uint8x16Max) \ |
| 2231 V(Uint8x16GreaterThan) \ | 2244 V(Uint8x16GreaterThan) \ |
| 2232 V(Uint8x16GreaterThanOrEqual) | 2245 V(Uint8x16GreaterThanOrEqual) \ |
| 2246 V(Simd128And) \ |
| 2247 V(Simd128Or) \ |
| 2248 V(Simd128Xor) |
| 2233 | 2249 |
| 2234 #define SIMD_SHIFT_OP_LIST(V) \ | 2250 #define SIMD_SHIFT_OP_LIST(V) \ |
| 2235 V(Int32x4ShiftLeftByScalar) \ | 2251 V(Int32x4ShiftLeftByScalar) \ |
| 2236 V(Int32x4ShiftRightByScalar) \ | 2252 V(Int32x4ShiftRightByScalar) \ |
| 2237 V(Uint32x4ShiftRightByScalar) \ | 2253 V(Uint32x4ShiftRightByScalar) \ |
| 2238 V(Int16x8ShiftLeftByScalar) \ | 2254 V(Int16x8ShiftLeftByScalar) \ |
| 2239 V(Int16x8ShiftRightByScalar) \ | 2255 V(Int16x8ShiftRightByScalar) \ |
| 2240 V(Uint16x8ShiftRightByScalar) \ | 2256 V(Uint16x8ShiftRightByScalar) \ |
| 2241 V(Int8x16ShiftLeftByScalar) \ | 2257 V(Int8x16ShiftLeftByScalar) \ |
| 2242 V(Int8x16ShiftRightByScalar) \ | 2258 V(Int8x16ShiftRightByScalar) \ |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2277 SIMD_BINOP_LIST(SIMD_VISIT_BINOP) | 2293 SIMD_BINOP_LIST(SIMD_VISIT_BINOP) |
| 2278 #undef SIMD_VISIT_BINOP | 2294 #undef SIMD_VISIT_BINOP |
| 2279 | 2295 |
| 2280 #define SIMD_VISIT_SHIFT_OP(Name) \ | 2296 #define SIMD_VISIT_SHIFT_OP(Name) \ |
| 2281 void InstructionSelector::Visit##Name(Node* node) { \ | 2297 void InstructionSelector::Visit##Name(Node* node) { \ |
| 2282 VisitRRI(this, kArm##Name, node); \ | 2298 VisitRRI(this, kArm##Name, node); \ |
| 2283 } | 2299 } |
| 2284 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) | 2300 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) |
| 2285 #undef SIMD_VISIT_SHIFT_OP | 2301 #undef SIMD_VISIT_SHIFT_OP |
| 2286 | 2302 |
| 2287 void InstructionSelector::VisitSimd32x4Select(Node* node) { | 2303 #define SIMD_VISIT_SELECT_OP(format) \ |
| 2288 ArmOperandGenerator g(this); | 2304 void InstructionSelector::VisitSimd##format##Select(Node* node) { \ |
| 2289 Emit(kArmSimd32x4Select, g.DefineAsRegister(node), | 2305 VisitRRRR(this, kArmSimd##format##Select, node); \ |
| 2290 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), | 2306 } |
| 2291 g.UseRegister(node->InputAt(2))); | 2307 SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP) |
| 2292 } | 2308 #undef SIMD_VISIT_SELECT_OP |
| 2293 | 2309 |
| 2294 // static | 2310 // static |
| 2295 MachineOperatorBuilder::Flags | 2311 MachineOperatorBuilder::Flags |
| 2296 InstructionSelector::SupportedMachineOperatorFlags() { | 2312 InstructionSelector::SupportedMachineOperatorFlags() { |
| 2297 MachineOperatorBuilder::Flags flags; | 2313 MachineOperatorBuilder::Flags flags; |
| 2298 if (CpuFeatures::IsSupported(SUDIV)) { | 2314 if (CpuFeatures::IsSupported(SUDIV)) { |
| 2299 // The sdiv and udiv instructions correctly return 0 if the divisor is 0, | 2315 // The sdiv and udiv instructions correctly return 0 if the divisor is 0, |
| 2300 // but the fall-back implementation does not. | 2316 // but the fall-back implementation does not. |
| 2301 flags |= MachineOperatorBuilder::kInt32DivIsSafe | | 2317 flags |= MachineOperatorBuilder::kInt32DivIsSafe | |
| 2302 MachineOperatorBuilder::kUint32DivIsSafe; | 2318 MachineOperatorBuilder::kUint32DivIsSafe; |
| (...skipping 21 matching lines...) Expand all Loading... |
| 2324 Vector<MachineType> req_aligned = Vector<MachineType>::New(2); | 2340 Vector<MachineType> req_aligned = Vector<MachineType>::New(2); |
| 2325 req_aligned[0] = MachineType::Float32(); | 2341 req_aligned[0] = MachineType::Float32(); |
| 2326 req_aligned[1] = MachineType::Float64(); | 2342 req_aligned[1] = MachineType::Float64(); |
| 2327 return MachineOperatorBuilder::AlignmentRequirements:: | 2343 return MachineOperatorBuilder::AlignmentRequirements:: |
| 2328 SomeUnalignedAccessUnsupported(req_aligned, req_aligned); | 2344 SomeUnalignedAccessUnsupported(req_aligned, req_aligned); |
| 2329 } | 2345 } |
| 2330 | 2346 |
| 2331 } // namespace compiler | 2347 } // namespace compiler |
| 2332 } // namespace internal | 2348 } // namespace internal |
| 2333 } // namespace v8 | 2349 } // namespace v8 |
| OLD | NEW |