Index: src/compiler/arm/instruction-scheduler-arm.cc |
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..ccffeee98115568fa6610439c26da6891364fe54 |
--- /dev/null |
+++ b/src/compiler/arm/instruction-scheduler-arm.cc |
@@ -0,0 +1,126 @@ |
+// Copyright 2015 the V8 project authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "src/compiler/instruction-scheduler.h" |
+ |
+namespace v8 { |
+namespace internal { |
+namespace compiler { |
+ |
+bool InstructionScheduler::SchedulerSupported() { return true; } |
+ |
+ |
+int InstructionScheduler::GetTargetInstructionFlags( |
+ const Instruction* instr) const { |
+ switch (instr->arch_opcode()) { |
+ case kArmAdd: |
+ case kArmAnd: |
+ case kArmBic: |
+ case kArmClz: |
+ case kArmCmp: |
+ case kArmCmn: |
+ case kArmTst: |
+ case kArmTeq: |
+ case kArmOrr: |
+ case kArmEor: |
+ case kArmSub: |
+ case kArmRsb: |
+ case kArmMul: |
+ case kArmMla: |
+ case kArmMls: |
+ case kArmSmmul: |
+ case kArmSmmla: |
+ case kArmUmull: |
+ case kArmSdiv: |
+ case kArmUdiv: |
+ case kArmMov: |
+ case kArmMvn: |
+ case kArmBfc: |
+ case kArmUbfx: |
+ case kArmSxtb: |
+ case kArmSxth: |
+ case kArmSxtab: |
+ case kArmSxtah: |
+ case kArmUxtb: |
+ case kArmUxth: |
+ case kArmUxtab: |
+ case kArmUxtah: |
+ case kArmVcmpF32: |
+ case kArmVaddF32: |
+ case kArmVsubF32: |
+ case kArmVmulF32: |
+ case kArmVmlaF32: |
+ case kArmVmlsF32: |
+ case kArmVdivF32: |
+ case kArmVabsF32: |
+ case kArmVnegF32: |
+ case kArmVsqrtF32: |
+ case kArmVcmpF64: |
+ case kArmVaddF64: |
+ case kArmVsubF64: |
+ case kArmVmulF64: |
+ case kArmVmlaF64: |
+ case kArmVmlsF64: |
+ case kArmVdivF64: |
+ case kArmVmodF64: |
+ case kArmVabsF64: |
+ case kArmVnegF64: |
+ case kArmVsqrtF64: |
+ case kArmVrintmF32: |
+ case kArmVrintmF64: |
+ case kArmVrintpF32: |
+ case kArmVrintpF64: |
+ case kArmVrintzF32: |
+ case kArmVrintzF64: |
+ case kArmVrintaF64: |
+ case kArmVrintnF32: |
+ case kArmVrintnF64: |
+ case kArmVcvtF32F64: |
+ case kArmVcvtF64F32: |
+ case kArmVcvtF64S32: |
+ case kArmVcvtF64U32: |
+ case kArmVcvtS32F64: |
+ case kArmVcvtU32F64: |
+ case kArmVmovLowU32F64: |
+ case kArmVmovLowF64U32: |
+ case kArmVmovHighU32F64: |
+ case kArmVmovHighF64U32: |
+ case kArmVmovF64U32U32: |
+ return kNoOpcodeFlags; |
+ |
+ case kArmVldrF32: |
+ case kArmVldrF64: |
+ case kArmLdrb: |
+ case kArmLdrsb: |
+ case kArmLdrh: |
+ case kArmLdrsh: |
+ case kArmLdr: |
+ return kIsLoadOperation; |
+ |
+ case kArmVstrF32: |
+ case kArmVstrF64: |
+ case kArmStrb: |
+ case kArmStrh: |
+ case kArmStr: |
+ case kArmPush: |
+ case kArmPoke: |
+ return kHasSideEffect; |
+ |
+#define CASE(Name) case k##Name: |
+ COMMON_ARCH_OPCODE_LIST(CASE) |
+#undef CASE |
+ // Already covered in architecture independent code. |
+ UNREACHABLE(); |
+ } |
+} |
+ |
+ |
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { |
+ // TODO(all): Add instruction cost modeling. |
+ return 1; |
+} |
+ |
+} // namespace compiler |
+} // namespace internal |
+} // namespace v8 |