OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/compiler/code-generator.h" |
| 6 |
| 7 #include "src/arm64/macro-assembler-arm64.h" |
| 8 #include "src/compiler/code-generator-impl.h" |
| 9 #include "src/compiler/gap-resolver.h" |
| 10 #include "src/compiler/node-matchers.h" |
| 11 #include "src/compiler/node-properties-inl.h" |
| 12 #include "src/scopes.h" |
| 13 |
| 14 namespace v8 { |
| 15 namespace internal { |
| 16 namespace compiler { |
| 17 |
| 18 #define __ masm()-> |
| 19 |
| 20 |
| 21 // Adds Arm64-specific methods to convert InstructionOperands. |
| 22 class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter { |
| 23 public: |
| 24 Arm64OperandConverter(CodeGenerator* gen, Instruction* instr) |
| 25 : InstructionOperandConverter(gen, instr) {} |
| 26 |
| 27 Register InputRegister32(int index) { |
| 28 return ToRegister(instr_->InputAt(index)).W(); |
| 29 } |
| 30 |
| 31 Register InputRegister64(int index) { return InputRegister(index); } |
| 32 |
| 33 Operand InputImmediate(int index) { |
| 34 return ToImmediate(instr_->InputAt(index)); |
| 35 } |
| 36 |
| 37 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); } |
| 38 |
| 39 Operand InputOperand64(int index) { return InputOperand(index); } |
| 40 |
| 41 Operand InputOperand32(int index) { |
| 42 return ToOperand32(instr_->InputAt(index)); |
| 43 } |
| 44 |
| 45 Register OutputRegister64() { return OutputRegister(); } |
| 46 |
| 47 Register OutputRegister32() { return ToRegister(instr_->Output()).W(); } |
| 48 |
| 49 MemOperand MemoryOperand(int* first_index) { |
| 50 const int index = *first_index; |
| 51 switch (AddressingModeField::decode(instr_->opcode())) { |
| 52 case kMode_None: |
| 53 break; |
| 54 case kMode_MRI: |
| 55 *first_index += 2; |
| 56 return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); |
| 57 case kMode_MRR: |
| 58 *first_index += 2; |
| 59 return MemOperand(InputRegister(index + 0), InputRegister(index + 1), |
| 60 SXTW); |
| 61 } |
| 62 UNREACHABLE(); |
| 63 return MemOperand(no_reg); |
| 64 } |
| 65 |
| 66 MemOperand MemoryOperand() { |
| 67 int index = 0; |
| 68 return MemoryOperand(&index); |
| 69 } |
| 70 |
| 71 Operand ToOperand(InstructionOperand* op) { |
| 72 if (op->IsRegister()) { |
| 73 return Operand(ToRegister(op)); |
| 74 } |
| 75 return ToImmediate(op); |
| 76 } |
| 77 |
| 78 Operand ToOperand32(InstructionOperand* op) { |
| 79 if (op->IsRegister()) { |
| 80 return Operand(ToRegister(op).W()); |
| 81 } |
| 82 return ToImmediate(op); |
| 83 } |
| 84 |
| 85 Operand ToImmediate(InstructionOperand* operand) { |
| 86 Constant constant = ToConstant(operand); |
| 87 switch (constant.type()) { |
| 88 case Constant::kInt32: |
| 89 return Operand(constant.ToInt32()); |
| 90 case Constant::kInt64: |
| 91 return Operand(constant.ToInt64()); |
| 92 case Constant::kFloat64: |
| 93 return Operand( |
| 94 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); |
| 95 case Constant::kExternalReference: |
| 96 return Operand(constant.ToExternalReference()); |
| 97 case Constant::kHeapObject: |
| 98 return Operand(constant.ToHeapObject()); |
| 99 } |
| 100 UNREACHABLE(); |
| 101 return Operand(-1); |
| 102 } |
| 103 |
| 104 MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const { |
| 105 ASSERT(op != NULL); |
| 106 ASSERT(!op->IsRegister()); |
| 107 ASSERT(!op->IsDoubleRegister()); |
| 108 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
| 109 // The linkage computes where all spill slots are located. |
| 110 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0); |
| 111 return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp, |
| 112 offset.offset()); |
| 113 } |
| 114 }; |
| 115 |
| 116 |
| 117 #define ASSEMBLE_SHIFT(asm_instr, width) \ |
| 118 do { \ |
| 119 if (instr->InputAt(1)->IsRegister()) { \ |
| 120 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \ |
| 121 i.InputRegister##width(1)); \ |
| 122 } else { \ |
| 123 int64_t imm = i.InputOperand##width(1).immediate().value(); \ |
| 124 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \ |
| 125 } \ |
| 126 } while (0); |
| 127 |
| 128 |
| 129 // Assembles an instruction after register allocation, producing machine code. |
| 130 void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
| 131 Arm64OperandConverter i(this, instr); |
| 132 |
| 133 switch (ArchOpcodeField::decode(instr->opcode())) { |
| 134 case kArchJmp: |
| 135 __ B(code_->GetLabel(i.InputBlock(0))); |
| 136 break; |
| 137 case kArchNop: |
| 138 // don't emit code for nops. |
| 139 break; |
| 140 case kArchRet: |
| 141 AssembleReturn(); |
| 142 break; |
| 143 case kArchDeoptimize: { |
| 144 int deoptimization_id = MiscField::decode(instr->opcode()); |
| 145 BuildTranslation(instr, deoptimization_id); |
| 146 |
| 147 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( |
| 148 isolate(), deoptimization_id, Deoptimizer::LAZY); |
| 149 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); |
| 150 break; |
| 151 } |
| 152 case kArm64Add: |
| 153 __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 154 break; |
| 155 case kArm64Add32: |
| 156 __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); |
| 157 break; |
| 158 case kArm64And: |
| 159 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 160 break; |
| 161 case kArm64And32: |
| 162 __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); |
| 163 break; |
| 164 case kArm64Mul: |
| 165 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| 166 break; |
| 167 case kArm64Mul32: |
| 168 __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); |
| 169 break; |
| 170 case kArm64Idiv: |
| 171 __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| 172 break; |
| 173 case kArm64Idiv32: |
| 174 __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); |
| 175 break; |
| 176 case kArm64Udiv: |
| 177 __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| 178 break; |
| 179 case kArm64Udiv32: |
| 180 __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); |
| 181 break; |
| 182 case kArm64Imod: { |
| 183 UseScratchRegisterScope scope(masm()); |
| 184 Register temp = scope.AcquireX(); |
| 185 __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1)); |
| 186 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); |
| 187 break; |
| 188 } |
| 189 case kArm64Imod32: { |
| 190 UseScratchRegisterScope scope(masm()); |
| 191 Register temp = scope.AcquireW(); |
| 192 __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1)); |
| 193 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), |
| 194 i.InputRegister32(0)); |
| 195 break; |
| 196 } |
| 197 case kArm64Umod: { |
| 198 UseScratchRegisterScope scope(masm()); |
| 199 Register temp = scope.AcquireX(); |
| 200 __ Udiv(temp, i.InputRegister(0), i.InputRegister(1)); |
| 201 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); |
| 202 break; |
| 203 } |
| 204 case kArm64Umod32: { |
| 205 UseScratchRegisterScope scope(masm()); |
| 206 Register temp = scope.AcquireW(); |
| 207 __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1)); |
| 208 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), |
| 209 i.InputRegister32(0)); |
| 210 break; |
| 211 } |
| 212 // TODO(dcarney): use mvn instr?? |
| 213 case kArm64Not: |
| 214 __ Orn(i.OutputRegister(), xzr, i.InputOperand(0)); |
| 215 break; |
| 216 case kArm64Not32: |
| 217 __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0)); |
| 218 break; |
| 219 case kArm64Neg: |
| 220 __ Neg(i.OutputRegister(), i.InputOperand(0)); |
| 221 break; |
| 222 case kArm64Neg32: |
| 223 __ Neg(i.OutputRegister32(), i.InputOperand32(0)); |
| 224 break; |
| 225 case kArm64Or: |
| 226 __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 227 break; |
| 228 case kArm64Or32: |
| 229 __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); |
| 230 break; |
| 231 case kArm64Xor: |
| 232 __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 233 break; |
| 234 case kArm64Xor32: |
| 235 __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); |
| 236 break; |
| 237 case kArm64Sub: |
| 238 __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 239 break; |
| 240 case kArm64Sub32: |
| 241 __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1)); |
| 242 break; |
| 243 case kArm64Shl: |
| 244 ASSEMBLE_SHIFT(Lsl, 64); |
| 245 break; |
| 246 case kArm64Shl32: |
| 247 ASSEMBLE_SHIFT(Lsl, 32); |
| 248 break; |
| 249 case kArm64Shr: |
| 250 ASSEMBLE_SHIFT(Lsr, 64); |
| 251 break; |
| 252 case kArm64Shr32: |
| 253 ASSEMBLE_SHIFT(Lsr, 32); |
| 254 break; |
| 255 case kArm64Sar: |
| 256 ASSEMBLE_SHIFT(Asr, 64); |
| 257 break; |
| 258 case kArm64Sar32: |
| 259 ASSEMBLE_SHIFT(Asr, 32); |
| 260 break; |
| 261 case kArm64CallCodeObject: { |
| 262 if (instr->InputAt(0)->IsImmediate()) { |
| 263 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); |
| 264 __ Call(code, RelocInfo::CODE_TARGET); |
| 265 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 266 Safepoint::kNoLazyDeopt); |
| 267 } else { |
| 268 Register reg = i.InputRegister(0); |
| 269 int entry = Code::kHeaderSize - kHeapObjectTag; |
| 270 __ Ldr(reg, MemOperand(reg, entry)); |
| 271 __ Call(reg); |
| 272 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 273 Safepoint::kNoLazyDeopt); |
| 274 } |
| 275 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); |
| 276 if (lazy_deopt) { |
| 277 RecordLazyDeoptimizationEntry(instr); |
| 278 } |
| 279 // Meaningless instruction for ICs to overwrite. |
| 280 AddNopForSmiCodeInlining(); |
| 281 break; |
| 282 } |
| 283 case kArm64CallJSFunction: { |
| 284 Register func = i.InputRegister(0); |
| 285 |
| 286 // TODO(jarin) The load of the context should be separated from the call. |
| 287 __ Ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset)); |
| 288 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); |
| 289 __ Call(x10); |
| 290 |
| 291 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 292 Safepoint::kNoLazyDeopt); |
| 293 RecordLazyDeoptimizationEntry(instr); |
| 294 break; |
| 295 } |
| 296 case kArm64CallAddress: { |
| 297 DirectCEntryStub stub(isolate()); |
| 298 stub.GenerateCall(masm(), i.InputRegister(0)); |
| 299 break; |
| 300 } |
| 301 case kArm64Claim: { |
| 302 int words = MiscField::decode(instr->opcode()); |
| 303 __ Claim(words); |
| 304 break; |
| 305 } |
| 306 case kArm64Poke: { |
| 307 int slot = MiscField::decode(instr->opcode()); |
| 308 Operand operand(slot * kPointerSize); |
| 309 __ Poke(i.InputRegister(0), operand); |
| 310 break; |
| 311 } |
| 312 case kArm64PokePairZero: { |
| 313 // TODO(dcarney): test slot offset and register order. |
| 314 int slot = MiscField::decode(instr->opcode()) - 1; |
| 315 __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize); |
| 316 break; |
| 317 } |
| 318 case kArm64PokePair: { |
| 319 int slot = MiscField::decode(instr->opcode()) - 1; |
| 320 __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize); |
| 321 break; |
| 322 } |
| 323 case kArm64Drop: { |
| 324 int words = MiscField::decode(instr->opcode()); |
| 325 __ Drop(words); |
| 326 break; |
| 327 } |
| 328 case kArm64Cmp: |
| 329 __ Cmp(i.InputRegister(0), i.InputOperand(1)); |
| 330 break; |
| 331 case kArm64Cmp32: |
| 332 __ Cmp(i.InputRegister32(0), i.InputOperand32(1)); |
| 333 break; |
| 334 case kArm64Tst: |
| 335 __ Tst(i.InputRegister(0), i.InputOperand(1)); |
| 336 break; |
| 337 case kArm64Tst32: |
| 338 __ Tst(i.InputRegister32(0), i.InputOperand32(1)); |
| 339 break; |
| 340 case kArm64Float64Cmp: |
| 341 __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
| 342 break; |
| 343 case kArm64Float64Add: |
| 344 __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 345 i.InputDoubleRegister(1)); |
| 346 break; |
| 347 case kArm64Float64Sub: |
| 348 __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 349 i.InputDoubleRegister(1)); |
| 350 break; |
| 351 case kArm64Float64Mul: |
| 352 __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 353 i.InputDoubleRegister(1)); |
| 354 break; |
| 355 case kArm64Float64Div: |
| 356 __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 357 i.InputDoubleRegister(1)); |
| 358 break; |
| 359 case kArm64Float64Mod: { |
| 360 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc |
| 361 FrameScope scope(masm(), StackFrame::MANUAL); |
| 362 ASSERT(d0.is(i.InputDoubleRegister(0))); |
| 363 ASSERT(d1.is(i.InputDoubleRegister(1))); |
| 364 ASSERT(d0.is(i.OutputDoubleRegister())); |
| 365 // TODO(dcarney): make sure this saves all relevant registers. |
| 366 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), |
| 367 0, 2); |
| 368 break; |
| 369 } |
| 370 case kArm64Int32ToInt64: |
| 371 __ Sxtw(i.OutputRegister(), i.InputRegister(0)); |
| 372 break; |
| 373 case kArm64Int64ToInt32: |
| 374 if (!i.OutputRegister().is(i.InputRegister(0))) { |
| 375 __ Mov(i.OutputRegister(), i.InputRegister(0)); |
| 376 } |
| 377 break; |
| 378 case kArm64Float64ToInt32: |
| 379 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0)); |
| 380 break; |
| 381 case kArm64Int32ToFloat64: |
| 382 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0)); |
| 383 break; |
| 384 case kArm64LoadWord8: |
| 385 __ Ldrb(i.OutputRegister(), i.MemoryOperand()); |
| 386 break; |
| 387 case kArm64StoreWord8: |
| 388 __ Strb(i.InputRegister(2), i.MemoryOperand()); |
| 389 break; |
| 390 case kArm64LoadWord16: |
| 391 __ Ldrh(i.OutputRegister(), i.MemoryOperand()); |
| 392 break; |
| 393 case kArm64StoreWord16: |
| 394 __ Strh(i.InputRegister(2), i.MemoryOperand()); |
| 395 break; |
| 396 case kArm64LoadWord32: |
| 397 __ Ldr(i.OutputRegister32(), i.MemoryOperand()); |
| 398 break; |
| 399 case kArm64StoreWord32: |
| 400 __ Str(i.InputRegister32(2), i.MemoryOperand()); |
| 401 break; |
| 402 case kArm64LoadWord64: |
| 403 __ Ldr(i.OutputRegister(), i.MemoryOperand()); |
| 404 break; |
| 405 case kArm64StoreWord64: |
| 406 __ Str(i.InputRegister(2), i.MemoryOperand()); |
| 407 break; |
| 408 case kArm64Float64Load: |
| 409 __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand()); |
| 410 break; |
| 411 case kArm64Float64Store: |
| 412 __ Str(i.InputDoubleRegister(2), i.MemoryOperand()); |
| 413 break; |
| 414 case kArm64StoreWriteBarrier: { |
| 415 Register object = i.InputRegister(0); |
| 416 Register index = i.InputRegister(1); |
| 417 Register value = i.InputRegister(2); |
| 418 __ Add(index, object, Operand(index, SXTW)); |
| 419 __ Str(value, MemOperand(index)); |
| 420 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters() |
| 421 ? kSaveFPRegs |
| 422 : kDontSaveFPRegs; |
| 423 // TODO(dcarney): we shouldn't test write barriers from c calls. |
| 424 LinkRegisterStatus lr_status = kLRHasNotBeenSaved; |
| 425 UseScratchRegisterScope scope(masm()); |
| 426 Register temp = no_reg; |
| 427 if (csp.is(masm()->StackPointer())) { |
| 428 temp = scope.AcquireX(); |
| 429 lr_status = kLRHasBeenSaved; |
| 430 __ Push(lr, temp); // Need to push a pair |
| 431 } |
| 432 __ RecordWrite(object, index, value, lr_status, mode); |
| 433 if (csp.is(masm()->StackPointer())) { |
| 434 __ Pop(temp, lr); |
| 435 } |
| 436 break; |
| 437 } |
| 438 } |
| 439 } |
| 440 |
| 441 |
| 442 // Assemble branches after this instruction. |
| 443 void CodeGenerator::AssembleArchBranch(Instruction* instr, |
| 444 FlagsCondition condition) { |
| 445 Arm64OperandConverter i(this, instr); |
| 446 Label done; |
| 447 |
| 448 // Emit a branch. The true and false targets are always the last two inputs |
| 449 // to the instruction. |
| 450 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2); |
| 451 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1); |
| 452 bool fallthru = IsNextInAssemblyOrder(fblock); |
| 453 Label* tlabel = code()->GetLabel(tblock); |
| 454 Label* flabel = fallthru ? &done : code()->GetLabel(fblock); |
| 455 switch (condition) { |
| 456 case kUnorderedEqual: |
| 457 __ B(vs, flabel); |
| 458 // Fall through. |
| 459 case kEqual: |
| 460 __ B(eq, tlabel); |
| 461 break; |
| 462 case kUnorderedNotEqual: |
| 463 __ B(vs, tlabel); |
| 464 // Fall through. |
| 465 case kNotEqual: |
| 466 __ B(ne, tlabel); |
| 467 break; |
| 468 case kSignedLessThan: |
| 469 __ B(lt, tlabel); |
| 470 break; |
| 471 case kSignedGreaterThanOrEqual: |
| 472 __ B(ge, tlabel); |
| 473 break; |
| 474 case kSignedLessThanOrEqual: |
| 475 __ B(le, tlabel); |
| 476 break; |
| 477 case kSignedGreaterThan: |
| 478 __ B(gt, tlabel); |
| 479 break; |
| 480 case kUnorderedLessThan: |
| 481 __ B(vs, flabel); |
| 482 // Fall through. |
| 483 case kUnsignedLessThan: |
| 484 __ B(lo, tlabel); |
| 485 break; |
| 486 case kUnorderedGreaterThanOrEqual: |
| 487 __ B(vs, tlabel); |
| 488 // Fall through. |
| 489 case kUnsignedGreaterThanOrEqual: |
| 490 __ B(hs, tlabel); |
| 491 break; |
| 492 case kUnorderedLessThanOrEqual: |
| 493 __ B(vs, flabel); |
| 494 // Fall through. |
| 495 case kUnsignedLessThanOrEqual: |
| 496 __ B(ls, tlabel); |
| 497 break; |
| 498 case kUnorderedGreaterThan: |
| 499 __ B(vs, tlabel); |
| 500 // Fall through. |
| 501 case kUnsignedGreaterThan: |
| 502 __ B(hi, tlabel); |
| 503 break; |
| 504 } |
| 505 if (!fallthru) __ B(flabel); // no fallthru to flabel. |
| 506 __ Bind(&done); |
| 507 } |
| 508 |
| 509 |
| 510 // Assemble boolean materializations after this instruction. |
| 511 void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
| 512 FlagsCondition condition) { |
| 513 Arm64OperandConverter i(this, instr); |
| 514 Label done; |
| 515 |
| 516 // Materialize a full 64-bit 1 or 0 value. |
| 517 Label check; |
| 518 Register reg = i.OutputRegister(); |
| 519 Condition cc = nv; |
| 520 switch (condition) { |
| 521 case kUnorderedEqual: |
| 522 __ B(vc, &check); |
| 523 __ Mov(reg, 0); |
| 524 __ B(&done); |
| 525 // Fall through. |
| 526 case kEqual: |
| 527 cc = eq; |
| 528 break; |
| 529 case kUnorderedNotEqual: |
| 530 __ B(vc, &check); |
| 531 __ Mov(reg, 1); |
| 532 __ B(&done); |
| 533 // Fall through. |
| 534 case kNotEqual: |
| 535 cc = ne; |
| 536 break; |
| 537 case kSignedLessThan: |
| 538 cc = lt; |
| 539 break; |
| 540 case kSignedGreaterThanOrEqual: |
| 541 cc = ge; |
| 542 break; |
| 543 case kSignedLessThanOrEqual: |
| 544 cc = le; |
| 545 break; |
| 546 case kSignedGreaterThan: |
| 547 cc = gt; |
| 548 break; |
| 549 case kUnorderedLessThan: |
| 550 __ B(vc, &check); |
| 551 __ Mov(reg, 0); |
| 552 __ B(&done); |
| 553 // Fall through. |
| 554 case kUnsignedLessThan: |
| 555 cc = lo; |
| 556 break; |
| 557 case kUnorderedGreaterThanOrEqual: |
| 558 __ B(vc, &check); |
| 559 __ Mov(reg, 1); |
| 560 __ B(&done); |
| 561 // Fall through. |
| 562 case kUnsignedGreaterThanOrEqual: |
| 563 cc = hs; |
| 564 break; |
| 565 case kUnorderedLessThanOrEqual: |
| 566 __ B(vc, &check); |
| 567 __ Mov(reg, 0); |
| 568 __ B(&done); |
| 569 // Fall through. |
| 570 case kUnsignedLessThanOrEqual: |
| 571 cc = ls; |
| 572 break; |
| 573 case kUnorderedGreaterThan: |
| 574 __ B(vc, &check); |
| 575 __ Mov(reg, 1); |
| 576 __ B(&done); |
| 577 // Fall through. |
| 578 case kUnsignedGreaterThan: |
| 579 cc = hi; |
| 580 break; |
| 581 } |
| 582 __ bind(&check); |
| 583 __ Cset(reg, cc); |
| 584 __ B(&done); |
| 585 __ Bind(&done); |
| 586 } |
| 587 |
| 588 |
| 589 // TODO(dcarney): increase stack slots in frame once before first use. |
| 590 static int AlignedStackSlots(int stack_slots) { |
| 591 if (stack_slots & 1) stack_slots++; |
| 592 return stack_slots; |
| 593 } |
| 594 |
| 595 |
| 596 void CodeGenerator::AssemblePrologue() { |
| 597 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| 598 if (descriptor->kind() == CallDescriptor::kCallAddress) { |
| 599 __ SetStackPointer(csp); |
| 600 __ Push(lr, fp); |
| 601 __ Mov(fp, csp); |
| 602 // TODO(dcarney): correct callee saved registers. |
| 603 __ PushCalleeSavedRegisters(); |
| 604 frame()->SetRegisterSaveAreaSize(20 * kPointerSize); |
| 605 } else if (descriptor->IsJSFunctionCall()) { |
| 606 CompilationInfo* info = linkage()->info(); |
| 607 __ SetStackPointer(jssp); |
| 608 __ Prologue(info->IsCodePreAgingActive()); |
| 609 frame()->SetRegisterSaveAreaSize( |
| 610 StandardFrameConstants::kFixedFrameSizeFromFp); |
| 611 |
| 612 // Sloppy mode functions and builtins need to replace the receiver with the |
| 613 // global proxy when called as functions (without an explicit receiver |
| 614 // object). |
| 615 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? |
| 616 if (info->strict_mode() == SLOPPY && !info->is_native()) { |
| 617 Label ok; |
| 618 // +2 for return address and saved frame pointer. |
| 619 int receiver_slot = info->scope()->num_parameters() + 2; |
| 620 __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize)); |
| 621 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); |
| 622 __ Ldr(x10, GlobalObjectMemOperand()); |
| 623 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset)); |
| 624 __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize)); |
| 625 __ Bind(&ok); |
| 626 } |
| 627 |
| 628 } else { |
| 629 __ SetStackPointer(jssp); |
| 630 __ StubPrologue(); |
| 631 frame()->SetRegisterSaveAreaSize( |
| 632 StandardFrameConstants::kFixedFrameSizeFromFp); |
| 633 } |
| 634 int stack_slots = frame()->GetSpillSlotCount(); |
| 635 if (stack_slots > 0) { |
| 636 Register sp = __ StackPointer(); |
| 637 if (!sp.Is(csp)) { |
| 638 __ Sub(sp, sp, stack_slots * kPointerSize); |
| 639 } |
| 640 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize); |
| 641 } |
| 642 } |
| 643 |
| 644 |
| 645 void CodeGenerator::AssembleReturn() { |
| 646 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| 647 if (descriptor->kind() == CallDescriptor::kCallAddress) { |
| 648 if (frame()->GetRegisterSaveAreaSize() > 0) { |
| 649 // Remove this frame's spill slots first. |
| 650 int stack_slots = frame()->GetSpillSlotCount(); |
| 651 if (stack_slots > 0) { |
| 652 __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize); |
| 653 } |
| 654 // Restore registers. |
| 655 // TODO(dcarney): correct callee saved registers. |
| 656 __ PopCalleeSavedRegisters(); |
| 657 } |
| 658 __ Mov(csp, fp); |
| 659 __ Pop(fp, lr); |
| 660 __ Ret(); |
| 661 } else { |
| 662 __ Mov(jssp, fp); |
| 663 __ Pop(fp, lr); |
| 664 int pop_count = |
| 665 descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0; |
| 666 __ Drop(pop_count); |
| 667 __ Ret(); |
| 668 } |
| 669 } |
| 670 |
| 671 |
| 672 void CodeGenerator::AssembleMove(InstructionOperand* source, |
| 673 InstructionOperand* destination) { |
| 674 Arm64OperandConverter g(this, NULL); |
| 675 // Dispatch on the source and destination operand kinds. Not all |
| 676 // combinations are possible. |
| 677 if (source->IsRegister()) { |
| 678 ASSERT(destination->IsRegister() || destination->IsStackSlot()); |
| 679 Register src = g.ToRegister(source); |
| 680 if (destination->IsRegister()) { |
| 681 __ Mov(g.ToRegister(destination), src); |
| 682 } else { |
| 683 __ Str(src, g.ToMemOperand(destination, masm())); |
| 684 } |
| 685 } else if (source->IsStackSlot()) { |
| 686 MemOperand src = g.ToMemOperand(source, masm()); |
| 687 ASSERT(destination->IsRegister() || destination->IsStackSlot()); |
| 688 if (destination->IsRegister()) { |
| 689 __ Ldr(g.ToRegister(destination), src); |
| 690 } else { |
| 691 UseScratchRegisterScope scope(masm()); |
| 692 Register temp = scope.AcquireX(); |
| 693 __ Ldr(temp, src); |
| 694 __ Str(temp, g.ToMemOperand(destination, masm())); |
| 695 } |
| 696 } else if (source->IsConstant()) { |
| 697 ConstantOperand* constant_source = ConstantOperand::cast(source); |
| 698 if (destination->IsRegister() || destination->IsStackSlot()) { |
| 699 UseScratchRegisterScope scope(masm()); |
| 700 Register dst = destination->IsRegister() ? g.ToRegister(destination) |
| 701 : scope.AcquireX(); |
| 702 Constant src = g.ToConstant(source); |
| 703 if (src.type() == Constant::kHeapObject) { |
| 704 __ LoadObject(dst, src.ToHeapObject()); |
| 705 } else { |
| 706 __ Mov(dst, g.ToImmediate(source)); |
| 707 } |
| 708 if (destination->IsStackSlot()) { |
| 709 __ Str(dst, g.ToMemOperand(destination, masm())); |
| 710 } |
| 711 } else if (destination->IsDoubleRegister()) { |
| 712 FPRegister result = g.ToDoubleRegister(destination); |
| 713 __ Fmov(result, g.ToDouble(constant_source)); |
| 714 } else { |
| 715 ASSERT(destination->IsDoubleStackSlot()); |
| 716 UseScratchRegisterScope scope(masm()); |
| 717 FPRegister temp = scope.AcquireD(); |
| 718 __ Fmov(temp, g.ToDouble(constant_source)); |
| 719 __ Str(temp, g.ToMemOperand(destination, masm())); |
| 720 } |
| 721 } else if (source->IsDoubleRegister()) { |
| 722 FPRegister src = g.ToDoubleRegister(source); |
| 723 if (destination->IsDoubleRegister()) { |
| 724 FPRegister dst = g.ToDoubleRegister(destination); |
| 725 __ Fmov(dst, src); |
| 726 } else { |
| 727 ASSERT(destination->IsDoubleStackSlot()); |
| 728 __ Str(src, g.ToMemOperand(destination, masm())); |
| 729 } |
| 730 } else if (source->IsDoubleStackSlot()) { |
| 731 ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); |
| 732 MemOperand src = g.ToMemOperand(source, masm()); |
| 733 if (destination->IsDoubleRegister()) { |
| 734 __ Ldr(g.ToDoubleRegister(destination), src); |
| 735 } else { |
| 736 UseScratchRegisterScope scope(masm()); |
| 737 FPRegister temp = scope.AcquireD(); |
| 738 __ Ldr(temp, src); |
| 739 __ Str(temp, g.ToMemOperand(destination, masm())); |
| 740 } |
| 741 } else { |
| 742 UNREACHABLE(); |
| 743 } |
| 744 } |
| 745 |
| 746 |
| 747 void CodeGenerator::AssembleSwap(InstructionOperand* source, |
| 748 InstructionOperand* destination) { |
| 749 Arm64OperandConverter g(this, NULL); |
| 750 // Dispatch on the source and destination operand kinds. Not all |
| 751 // combinations are possible. |
| 752 if (source->IsRegister()) { |
| 753 // Register-register. |
| 754 UseScratchRegisterScope scope(masm()); |
| 755 Register temp = scope.AcquireX(); |
| 756 Register src = g.ToRegister(source); |
| 757 if (destination->IsRegister()) { |
| 758 Register dst = g.ToRegister(destination); |
| 759 __ Mov(temp, src); |
| 760 __ Mov(src, dst); |
| 761 __ Mov(dst, temp); |
| 762 } else { |
| 763 ASSERT(destination->IsStackSlot()); |
| 764 MemOperand dst = g.ToMemOperand(destination, masm()); |
| 765 __ Mov(temp, src); |
| 766 __ Ldr(src, dst); |
| 767 __ Str(temp, dst); |
| 768 } |
| 769 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) { |
| 770 UseScratchRegisterScope scope(masm()); |
| 771 CPURegister temp_0 = scope.AcquireX(); |
| 772 CPURegister temp_1 = scope.AcquireX(); |
| 773 MemOperand src = g.ToMemOperand(source, masm()); |
| 774 MemOperand dst = g.ToMemOperand(destination, masm()); |
| 775 __ Ldr(temp_0, src); |
| 776 __ Ldr(temp_1, dst); |
| 777 __ Str(temp_0, dst); |
| 778 __ Str(temp_1, src); |
| 779 } else if (source->IsDoubleRegister()) { |
| 780 UseScratchRegisterScope scope(masm()); |
| 781 FPRegister temp = scope.AcquireD(); |
| 782 FPRegister src = g.ToDoubleRegister(source); |
| 783 if (destination->IsDoubleRegister()) { |
| 784 FPRegister dst = g.ToDoubleRegister(destination); |
| 785 __ Fmov(temp, src); |
| 786 __ Fmov(src, dst); |
| 787 __ Fmov(src, temp); |
| 788 } else { |
| 789 ASSERT(destination->IsDoubleStackSlot()); |
| 790 MemOperand dst = g.ToMemOperand(destination, masm()); |
| 791 __ Fmov(temp, src); |
| 792 __ Ldr(src, dst); |
| 793 __ Str(temp, dst); |
| 794 } |
| 795 } else { |
| 796 // No other combinations are possible. |
| 797 UNREACHABLE(); |
| 798 } |
| 799 } |
| 800 |
| 801 |
| 802 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); } |
| 803 |
| 804 #undef __ |
| 805 |
| 806 #if DEBUG |
| 807 |
| 808 // Checks whether the code between start_pc and end_pc is a no-op. |
| 809 bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, |
| 810 int end_pc) { |
| 811 if (start_pc + 4 != end_pc) { |
| 812 return false; |
| 813 } |
| 814 Address instr_address = code->instruction_start() + start_pc; |
| 815 |
| 816 v8::internal::Instruction* instr = |
| 817 reinterpret_cast<v8::internal::Instruction*>(instr_address); |
| 818 return instr->IsMovz() && instr->Rd() == xzr.code() && instr->SixtyFourBits(); |
| 819 } |
| 820 |
| 821 #endif // DEBUG |
| 822 |
| 823 } // namespace compiler |
| 824 } // namespace internal |
| 825 } // namespace v8 |
OLD | NEW |