| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/instruction-selector-impl.h" | 5 #include "src/compiler/instruction-selector-impl.h" |
| 6 #include "src/compiler/node-matchers.h" | 6 #include "src/compiler/node-matchers.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 27 : OperandGenerator(selector) {} | 27 : OperandGenerator(selector) {} |
| 28 | 28 |
| 29 InstructionOperand* UseOperand(Node* node, ImmediateMode mode) { | 29 InstructionOperand* UseOperand(Node* node, ImmediateMode mode) { |
| 30 if (CanBeImmediate(node, mode)) { | 30 if (CanBeImmediate(node, mode)) { |
| 31 return UseImmediate(node); | 31 return UseImmediate(node); |
| 32 } | 32 } |
| 33 return UseRegister(node); | 33 return UseRegister(node); |
| 34 } | 34 } |
| 35 | 35 |
| 36 bool CanBeImmediate(Node* node, ImmediateMode mode) { | 36 bool CanBeImmediate(Node* node, ImmediateMode mode) { |
| 37 int64_t value; | 37 Int32Matcher m(node); |
| 38 switch (node->opcode()) { | 38 if (!m.HasValue()) return false; |
| 39 // TODO(turbofan): SMI number constants as immediates. | 39 int64_t value = m.Value(); |
| 40 case IrOpcode::kInt32Constant: | |
| 41 value = ValueOf<int32_t>(node->op()); | |
| 42 break; | |
| 43 default: | |
| 44 return false; | |
| 45 } | |
| 46 unsigned ignored; | 40 unsigned ignored; |
| 47 switch (mode) { | 41 switch (mode) { |
| 48 case kLogical32Imm: | 42 case kLogical32Imm: |
| 49 // TODO(dcarney): some unencodable values can be handled by | 43 // TODO(dcarney): some unencodable values can be handled by |
| 50 // switching instructions. | 44 // switching instructions. |
| 51 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32, | 45 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32, |
| 52 &ignored, &ignored, &ignored); | 46 &ignored, &ignored, &ignored); |
| 53 case kLogical64Imm: | 47 case kLogical64Imm: |
| 54 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64, | 48 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64, |
| 55 &ignored, &ignored, &ignored); | 49 &ignored, &ignored, &ignored); |
| (...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 252 void InstructionSelector::VisitWord32Or(Node* node) { | 246 void InstructionSelector::VisitWord32Or(Node* node) { |
| 253 VisitBinop(this, node, kArm64Or32, kLogical32Imm); | 247 VisitBinop(this, node, kArm64Or32, kLogical32Imm); |
| 254 } | 248 } |
| 255 | 249 |
| 256 | 250 |
| 257 void InstructionSelector::VisitWord64Or(Node* node) { | 251 void InstructionSelector::VisitWord64Or(Node* node) { |
| 258 VisitBinop(this, node, kArm64Or, kLogical64Imm); | 252 VisitBinop(this, node, kArm64Or, kLogical64Imm); |
| 259 } | 253 } |
| 260 | 254 |
| 261 | 255 |
| 262 template <typename T> | 256 void InstructionSelector::VisitWord32Xor(Node* node) { |
| 263 static void VisitXor(InstructionSelector* selector, Node* node, | 257 Arm64OperandGenerator g(this); |
| 264 ArchOpcode xor_opcode, ArchOpcode not_opcode) { | 258 Int32BinopMatcher m(node); |
| 265 Arm64OperandGenerator g(selector); | |
| 266 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); | |
| 267 if (m.right().Is(-1)) { | 259 if (m.right().Is(-1)) { |
| 268 selector->Emit(not_opcode, g.DefineAsRegister(node), | 260 Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node())); |
| 269 g.UseRegister(m.left().node())); | |
| 270 } else { | 261 } else { |
| 271 VisitBinop(selector, node, xor_opcode, kLogical32Imm); | 262 VisitBinop(this, node, kArm64Xor32, kLogical32Imm); |
| 272 } | 263 } |
| 273 } | 264 } |
| 274 | 265 |
| 275 | 266 |
| 276 void InstructionSelector::VisitWord32Xor(Node* node) { | 267 void InstructionSelector::VisitWord64Xor(Node* node) { |
| 277 VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32); | 268 Arm64OperandGenerator g(this); |
| 269 Int64BinopMatcher m(node); |
| 270 if (m.right().Is(-1)) { |
| 271 Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node())); |
| 272 } else { |
| 273 VisitBinop(this, node, kArm64Xor, kLogical32Imm); |
| 274 } |
| 278 } | 275 } |
| 279 | 276 |
| 280 | 277 |
| 281 void InstructionSelector::VisitWord64Xor(Node* node) { | |
| 282 VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not); | |
| 283 } | |
| 284 | |
| 285 | |
| 286 void InstructionSelector::VisitWord32Shl(Node* node) { | 278 void InstructionSelector::VisitWord32Shl(Node* node) { |
| 287 VisitRRO(this, kArm64Shl32, node, kShift32Imm); | 279 VisitRRO(this, kArm64Shl32, node, kShift32Imm); |
| 288 } | 280 } |
| 289 | 281 |
| 290 | 282 |
| 291 void InstructionSelector::VisitWord64Shl(Node* node) { | 283 void InstructionSelector::VisitWord64Shl(Node* node) { |
| 292 VisitRRO(this, kArm64Shl, node, kShift64Imm); | 284 VisitRRO(this, kArm64Shl, node, kShift64Imm); |
| 293 } | 285 } |
| 294 | 286 |
| 295 | 287 |
| (...skipping 30 matching lines...) Expand all Loading... |
| 326 void InstructionSelector::VisitInt32Add(Node* node) { | 318 void InstructionSelector::VisitInt32Add(Node* node) { |
| 327 VisitBinop(this, node, kArm64Add32, kArithimeticImm); | 319 VisitBinop(this, node, kArm64Add32, kArithimeticImm); |
| 328 } | 320 } |
| 329 | 321 |
| 330 | 322 |
| 331 void InstructionSelector::VisitInt64Add(Node* node) { | 323 void InstructionSelector::VisitInt64Add(Node* node) { |
| 332 VisitBinop(this, node, kArm64Add, kArithimeticImm); | 324 VisitBinop(this, node, kArm64Add, kArithimeticImm); |
| 333 } | 325 } |
| 334 | 326 |
| 335 | 327 |
| 336 template <typename T> | 328 void InstructionSelector::VisitInt32Sub(Node* node) { |
| 337 static void VisitSub(InstructionSelector* selector, Node* node, | 329 Arm64OperandGenerator g(this); |
| 338 ArchOpcode sub_opcode, ArchOpcode neg_opcode) { | 330 Int32BinopMatcher m(node); |
| 339 Arm64OperandGenerator g(selector); | |
| 340 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); | |
| 341 if (m.left().Is(0)) { | 331 if (m.left().Is(0)) { |
| 342 selector->Emit(neg_opcode, g.DefineAsRegister(node), | 332 Emit(kArm64Neg32, g.DefineAsRegister(node), |
| 343 g.UseRegister(m.right().node())); | 333 g.UseRegister(m.right().node())); |
| 344 } else { | 334 } else { |
| 345 VisitBinop(selector, node, sub_opcode, kArithimeticImm); | 335 VisitBinop(this, node, kArm64Sub32, kArithimeticImm); |
| 346 } | 336 } |
| 347 } | 337 } |
| 348 | 338 |
| 349 | 339 |
| 350 void InstructionSelector::VisitInt32Sub(Node* node) { | 340 void InstructionSelector::VisitInt64Sub(Node* node) { |
| 351 VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32); | 341 Arm64OperandGenerator g(this); |
| 342 Int64BinopMatcher m(node); |
| 343 if (m.left().Is(0)) { |
| 344 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node())); |
| 345 } else { |
| 346 VisitBinop(this, node, kArm64Sub, kArithimeticImm); |
| 347 } |
| 352 } | 348 } |
| 353 | 349 |
| 354 | 350 |
| 355 void InstructionSelector::VisitInt64Sub(Node* node) { | |
| 356 VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg); | |
| 357 } | |
| 358 | |
| 359 | |
| 360 void InstructionSelector::VisitInt32Mul(Node* node) { | 351 void InstructionSelector::VisitInt32Mul(Node* node) { |
| 361 VisitRRR(this, kArm64Mul32, node); | 352 VisitRRR(this, kArm64Mul32, node); |
| 362 } | 353 } |
| 363 | 354 |
| 364 | 355 |
| 365 void InstructionSelector::VisitInt64Mul(Node* node) { | 356 void InstructionSelector::VisitInt64Mul(Node* node) { |
| 366 VisitRRR(this, kArm64Mul, node); | 357 VisitRRR(this, kArm64Mul, node); |
| 367 } | 358 } |
| 368 | 359 |
| 369 | 360 |
| (...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 666 // Caller clean up of stack for C-style calls. | 657 // Caller clean up of stack for C-style calls. |
| 667 if (is_c_frame && aligned_push_count > 0) { | 658 if (is_c_frame && aligned_push_count > 0) { |
| 668 DCHECK(deoptimization == NULL && continuation == NULL); | 659 DCHECK(deoptimization == NULL && continuation == NULL); |
| 669 Emit(kArchDrop | MiscField::encode(aligned_push_count), NULL); | 660 Emit(kArchDrop | MiscField::encode(aligned_push_count), NULL); |
| 670 } | 661 } |
| 671 } | 662 } |
| 672 | 663 |
| 673 } // namespace compiler | 664 } // namespace compiler |
| 674 } // namespace internal | 665 } // namespace internal |
| 675 } // namespace v8 | 666 } // namespace v8 |
| OLD | NEW |