| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/arm64/macro-assembler-arm64.h" | 7 #include "src/arm64/macro-assembler-arm64.h" |
| 8 #include "src/compiler/code-generator-impl.h" | 8 #include "src/compiler/code-generator-impl.h" |
| 9 #include "src/compiler/gap-resolver.h" | 9 #include "src/compiler/gap-resolver.h" |
| 10 #include "src/compiler/node-matchers.h" | 10 #include "src/compiler/node-matchers.h" |
| (...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 267 break; | 267 break; |
| 268 case kArm64Sar32: | 268 case kArm64Sar32: |
| 269 ASSEMBLE_SHIFT(Asr, 32); | 269 ASSEMBLE_SHIFT(Asr, 32); |
| 270 break; | 270 break; |
| 271 case kArm64Ror: | 271 case kArm64Ror: |
| 272 ASSEMBLE_SHIFT(Ror, 64); | 272 ASSEMBLE_SHIFT(Ror, 64); |
| 273 break; | 273 break; |
| 274 case kArm64Ror32: | 274 case kArm64Ror32: |
| 275 ASSEMBLE_SHIFT(Ror, 32); | 275 ASSEMBLE_SHIFT(Ror, 32); |
| 276 break; | 276 break; |
| 277 case kArm64Mov32: |
| 278 __ Mov(i.OutputRegister32(), i.InputRegister32(0)); |
| 279 break; |
| 280 case kArm64Sxtw: |
| 281 __ Sxtw(i.OutputRegister(), i.InputRegister32(0)); |
| 282 break; |
| 277 case kArm64CallCodeObject: { | 283 case kArm64CallCodeObject: { |
| 278 if (instr->InputAt(0)->IsImmediate()) { | 284 if (instr->InputAt(0)->IsImmediate()) { |
| 279 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); | 285 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); |
| 280 __ Call(code, RelocInfo::CODE_TARGET); | 286 __ Call(code, RelocInfo::CODE_TARGET); |
| 281 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, | 287 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 282 Safepoint::kNoLazyDeopt); | 288 Safepoint::kNoLazyDeopt); |
| 283 } else { | 289 } else { |
| 284 Register reg = i.InputRegister(0); | 290 Register reg = i.InputRegister(0); |
| 285 int entry = Code::kHeaderSize - kHeapObjectTag; | 291 int entry = Code::kHeaderSize - kHeapObjectTag; |
| 286 __ Ldr(reg, MemOperand(reg, entry)); | 292 __ Ldr(reg, MemOperand(reg, entry)); |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 376 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc | 382 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc |
| 377 FrameScope scope(masm(), StackFrame::MANUAL); | 383 FrameScope scope(masm(), StackFrame::MANUAL); |
| 378 DCHECK(d0.is(i.InputDoubleRegister(0))); | 384 DCHECK(d0.is(i.InputDoubleRegister(0))); |
| 379 DCHECK(d1.is(i.InputDoubleRegister(1))); | 385 DCHECK(d1.is(i.InputDoubleRegister(1))); |
| 380 DCHECK(d0.is(i.OutputDoubleRegister())); | 386 DCHECK(d0.is(i.OutputDoubleRegister())); |
| 381 // TODO(dcarney): make sure this saves all relevant registers. | 387 // TODO(dcarney): make sure this saves all relevant registers. |
| 382 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), | 388 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), |
| 383 0, 2); | 389 0, 2); |
| 384 break; | 390 break; |
| 385 } | 391 } |
| 386 case kArm64Int32ToInt64: | |
| 387 __ Sxtw(i.OutputRegister(), i.InputRegister(0)); | |
| 388 break; | |
| 389 case kArm64Int64ToInt32: | |
| 390 if (!i.OutputRegister().is(i.InputRegister(0))) { | |
| 391 __ Mov(i.OutputRegister(), i.InputRegister(0)); | |
| 392 } | |
| 393 break; | |
| 394 case kArm64Float64ToInt32: | 392 case kArm64Float64ToInt32: |
| 395 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0)); | 393 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0)); |
| 396 break; | 394 break; |
| 397 case kArm64Float64ToUint32: | 395 case kArm64Float64ToUint32: |
| 398 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0)); | 396 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0)); |
| 399 break; | 397 break; |
| 400 case kArm64Int32ToFloat64: | 398 case kArm64Int32ToFloat64: |
| 401 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0)); | 399 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0)); |
| 402 break; | 400 break; |
| 403 case kArm64Uint32ToFloat64: | 401 case kArm64Uint32ToFloat64: |
| (...skipping 430 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 834 } | 832 } |
| 835 | 833 |
| 836 | 834 |
| 837 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); } | 835 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); } |
| 838 | 836 |
| 839 #undef __ | 837 #undef __ |
| 840 | 838 |
| 841 } // namespace compiler | 839 } // namespace compiler |
| 842 } // namespace internal | 840 } // namespace internal |
| 843 } // namespace v8 | 841 } // namespace v8 |
| OLD | NEW |