OLD | NEW |
(Empty) | |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/compiler/code-generator.h" |
| 6 |
| 7 #include "src/compiler/code-generator-impl.h" |
| 8 #include "src/compiler/gap-resolver.h" |
| 9 #include "src/compiler/node-matchers.h" |
| 10 #include "src/compiler/node-properties-inl.h" |
| 11 #include "src/scopes.h" |
| 12 #include "src/x64/assembler-x64.h" |
| 13 #include "src/x64/macro-assembler-x64.h" |
| 14 |
| 15 namespace v8 { |
| 16 namespace internal { |
| 17 namespace compiler { |
| 18 |
| 19 #define __ masm()-> |
| 20 |
| 21 |
| 22 // TODO(turbofan): Cleanup these hacks. |
| 23 enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference }; |
| 24 |
| 25 |
| 26 struct Immediate64 { |
| 27 uint64_t value; |
| 28 Handle<Object> handle; |
| 29 ExternalReference reference; |
| 30 Immediate64Type type; |
| 31 }; |
| 32 |
| 33 |
| 34 enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand }; |
| 35 |
| 36 |
| 37 struct RegisterOrOperand { |
| 38 RegisterOrOperand() : operand(no_reg, 0) {} |
| 39 Register reg; |
| 40 DoubleRegister double_reg; |
| 41 Operand operand; |
| 42 RegisterOrOperandType type; |
| 43 }; |
| 44 |
| 45 |
| 46 // Adds X64 specific methods for decoding operands. |
| 47 class X64OperandConverter : public InstructionOperandConverter { |
| 48 public: |
| 49 X64OperandConverter(CodeGenerator* gen, Instruction* instr) |
| 50 : InstructionOperandConverter(gen, instr) {} |
| 51 |
| 52 RegisterOrOperand InputRegisterOrOperand(int index) { |
| 53 return ToRegisterOrOperand(instr_->InputAt(index)); |
| 54 } |
| 55 |
| 56 Immediate InputImmediate(int index) { |
| 57 return ToImmediate(instr_->InputAt(index)); |
| 58 } |
| 59 |
| 60 RegisterOrOperand OutputRegisterOrOperand() { |
| 61 return ToRegisterOrOperand(instr_->Output()); |
| 62 } |
| 63 |
| 64 Immediate64 InputImmediate64(int index) { |
| 65 return ToImmediate64(instr_->InputAt(index)); |
| 66 } |
| 67 |
| 68 Immediate64 ToImmediate64(InstructionOperand* operand) { |
| 69 Constant constant = ToConstant(operand); |
| 70 Immediate64 immediate; |
| 71 immediate.value = 0xbeefdeaddeefbeed; |
| 72 immediate.type = kImm64Value; |
| 73 switch (constant.type()) { |
| 74 case Constant::kInt32: |
| 75 case Constant::kInt64: |
| 76 immediate.value = constant.ToInt64(); |
| 77 return immediate; |
| 78 case Constant::kFloat64: |
| 79 immediate.type = kImm64Handle; |
| 80 immediate.handle = |
| 81 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED); |
| 82 return immediate; |
| 83 case Constant::kExternalReference: |
| 84 immediate.type = kImm64Reference; |
| 85 immediate.reference = constant.ToExternalReference(); |
| 86 return immediate; |
| 87 case Constant::kHeapObject: |
| 88 immediate.type = kImm64Handle; |
| 89 immediate.handle = constant.ToHeapObject(); |
| 90 return immediate; |
| 91 } |
| 92 UNREACHABLE(); |
| 93 return immediate; |
| 94 } |
| 95 |
| 96 Immediate ToImmediate(InstructionOperand* operand) { |
| 97 Constant constant = ToConstant(operand); |
| 98 switch (constant.type()) { |
| 99 case Constant::kInt32: |
| 100 return Immediate(constant.ToInt32()); |
| 101 case Constant::kInt64: |
| 102 case Constant::kFloat64: |
| 103 case Constant::kExternalReference: |
| 104 case Constant::kHeapObject: |
| 105 break; |
| 106 } |
| 107 UNREACHABLE(); |
| 108 return Immediate(-1); |
| 109 } |
| 110 |
| 111 Operand ToOperand(InstructionOperand* op, int extra = 0) { |
| 112 RegisterOrOperand result = ToRegisterOrOperand(op, extra); |
| 113 ASSERT_EQ(kOperand, result.type); |
| 114 return result.operand; |
| 115 } |
| 116 |
| 117 RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) { |
| 118 RegisterOrOperand result; |
| 119 if (op->IsRegister()) { |
| 120 ASSERT(extra == 0); |
| 121 result.type = kRegister; |
| 122 result.reg = ToRegister(op); |
| 123 return result; |
| 124 } else if (op->IsDoubleRegister()) { |
| 125 ASSERT(extra == 0); |
| 126 ASSERT(extra == 0); |
| 127 result.type = kDoubleRegister; |
| 128 result.double_reg = ToDoubleRegister(op); |
| 129 return result; |
| 130 } |
| 131 |
| 132 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
| 133 |
| 134 result.type = kOperand; |
| 135 // The linkage computes where all spill slots are located. |
| 136 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra); |
| 137 result.operand = |
| 138 Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset()); |
| 139 return result; |
| 140 } |
| 141 |
| 142 Operand MemoryOperand(int* first_input) { |
| 143 const int offset = *first_input; |
| 144 switch (AddressingModeField::decode(instr_->opcode())) { |
| 145 case kMode_MR1I: { |
| 146 *first_input += 2; |
| 147 Register index = InputRegister(offset + 1); |
| 148 return Operand(InputRegister(offset + 0), index, times_1, |
| 149 0); // TODO(dcarney): K != 0 |
| 150 } |
| 151 case kMode_MRI: |
| 152 *first_input += 2; |
| 153 return Operand(InputRegister(offset + 0), InputInt32(offset + 1)); |
| 154 default: |
| 155 UNREACHABLE(); |
| 156 return Operand(no_reg, 0); |
| 157 } |
| 158 } |
| 159 |
| 160 Operand MemoryOperand() { |
| 161 int first_input = 0; |
| 162 return MemoryOperand(&first_input); |
| 163 } |
| 164 }; |
| 165 |
| 166 |
| 167 static bool HasImmediateInput(Instruction* instr, int index) { |
| 168 return instr->InputAt(index)->IsImmediate(); |
| 169 } |
| 170 |
| 171 |
| 172 #define ASSEMBLE_BINOP(asm_instr) \ |
| 173 do { \ |
| 174 if (HasImmediateInput(instr, 1)) { \ |
| 175 RegisterOrOperand input = i.InputRegisterOrOperand(0); \ |
| 176 if (input.type == kRegister) { \ |
| 177 __ asm_instr(input.reg, i.InputImmediate(1)); \ |
| 178 } else { \ |
| 179 __ asm_instr(input.operand, i.InputImmediate(1)); \ |
| 180 } \ |
| 181 } else { \ |
| 182 RegisterOrOperand input = i.InputRegisterOrOperand(1); \ |
| 183 if (input.type == kRegister) { \ |
| 184 __ asm_instr(i.InputRegister(0), input.reg); \ |
| 185 } else { \ |
| 186 __ asm_instr(i.InputRegister(0), input.operand); \ |
| 187 } \ |
| 188 } \ |
| 189 } while (0) |
| 190 |
| 191 |
| 192 #define ASSEMBLE_SHIFT(asm_instr, width) \ |
| 193 do { \ |
| 194 if (HasImmediateInput(instr, 1)) { \ |
| 195 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \ |
| 196 } else { \ |
| 197 __ asm_instr##_cl(i.OutputRegister()); \ |
| 198 } \ |
| 199 } while (0) |
| 200 |
| 201 |
| 202 // Assembles an instruction after register allocation, producing machine code. |
| 203 void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
| 204 X64OperandConverter i(this, instr); |
| 205 |
| 206 switch (ArchOpcodeField::decode(instr->opcode())) { |
| 207 case kArchJmp: |
| 208 __ jmp(code_->GetLabel(i.InputBlock(0))); |
| 209 break; |
| 210 case kArchNop: |
| 211 // don't emit code for nops. |
| 212 break; |
| 213 case kArchRet: |
| 214 AssembleReturn(); |
| 215 break; |
| 216 case kArchDeoptimize: { |
| 217 int deoptimization_id = MiscField::decode(instr->opcode()); |
| 218 BuildTranslation(instr, deoptimization_id); |
| 219 |
| 220 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( |
| 221 isolate(), deoptimization_id, Deoptimizer::LAZY); |
| 222 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY); |
| 223 break; |
| 224 } |
| 225 case kX64Add32: |
| 226 ASSEMBLE_BINOP(addl); |
| 227 break; |
| 228 case kX64Add: |
| 229 ASSEMBLE_BINOP(addq); |
| 230 break; |
| 231 case kX64Sub32: |
| 232 ASSEMBLE_BINOP(subl); |
| 233 break; |
| 234 case kX64Sub: |
| 235 ASSEMBLE_BINOP(subq); |
| 236 break; |
| 237 case kX64And32: |
| 238 ASSEMBLE_BINOP(andl); |
| 239 break; |
| 240 case kX64And: |
| 241 ASSEMBLE_BINOP(andq); |
| 242 break; |
| 243 case kX64Cmp32: |
| 244 ASSEMBLE_BINOP(cmpl); |
| 245 break; |
| 246 case kX64Cmp: |
| 247 ASSEMBLE_BINOP(cmpq); |
| 248 break; |
| 249 case kX64Test32: |
| 250 ASSEMBLE_BINOP(testl); |
| 251 break; |
| 252 case kX64Test: |
| 253 ASSEMBLE_BINOP(testq); |
| 254 break; |
| 255 case kX64Imul32: |
| 256 if (HasImmediateInput(instr, 1)) { |
| 257 RegisterOrOperand input = i.InputRegisterOrOperand(0); |
| 258 if (input.type == kRegister) { |
| 259 __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1)); |
| 260 } else { |
| 261 __ movq(kScratchRegister, input.operand); |
| 262 __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1)); |
| 263 } |
| 264 } else { |
| 265 RegisterOrOperand input = i.InputRegisterOrOperand(1); |
| 266 if (input.type == kRegister) { |
| 267 __ imull(i.OutputRegister(), input.reg); |
| 268 } else { |
| 269 __ imull(i.OutputRegister(), input.operand); |
| 270 } |
| 271 } |
| 272 break; |
| 273 case kX64Imul: |
| 274 if (HasImmediateInput(instr, 1)) { |
| 275 RegisterOrOperand input = i.InputRegisterOrOperand(0); |
| 276 if (input.type == kRegister) { |
| 277 __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1)); |
| 278 } else { |
| 279 __ movq(kScratchRegister, input.operand); |
| 280 __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1)); |
| 281 } |
| 282 } else { |
| 283 RegisterOrOperand input = i.InputRegisterOrOperand(1); |
| 284 if (input.type == kRegister) { |
| 285 __ imulq(i.OutputRegister(), input.reg); |
| 286 } else { |
| 287 __ imulq(i.OutputRegister(), input.operand); |
| 288 } |
| 289 } |
| 290 break; |
| 291 case kX64Idiv32: |
| 292 __ cdq(); |
| 293 __ idivl(i.InputRegister(1)); |
| 294 break; |
| 295 case kX64Idiv: |
| 296 __ cqo(); |
| 297 __ idivq(i.InputRegister(1)); |
| 298 break; |
| 299 case kX64Udiv32: |
| 300 __ xorl(rdx, rdx); |
| 301 __ divl(i.InputRegister(1)); |
| 302 break; |
| 303 case kX64Udiv: |
| 304 __ xorq(rdx, rdx); |
| 305 __ divq(i.InputRegister(1)); |
| 306 break; |
| 307 case kX64Not: { |
| 308 RegisterOrOperand output = i.OutputRegisterOrOperand(); |
| 309 if (output.type == kRegister) { |
| 310 __ notq(output.reg); |
| 311 } else { |
| 312 __ notq(output.operand); |
| 313 } |
| 314 break; |
| 315 } |
| 316 case kX64Not32: { |
| 317 RegisterOrOperand output = i.OutputRegisterOrOperand(); |
| 318 if (output.type == kRegister) { |
| 319 __ notl(output.reg); |
| 320 } else { |
| 321 __ notl(output.operand); |
| 322 } |
| 323 break; |
| 324 } |
| 325 case kX64Neg: { |
| 326 RegisterOrOperand output = i.OutputRegisterOrOperand(); |
| 327 if (output.type == kRegister) { |
| 328 __ negq(output.reg); |
| 329 } else { |
| 330 __ negq(output.operand); |
| 331 } |
| 332 break; |
| 333 } |
| 334 case kX64Neg32: { |
| 335 RegisterOrOperand output = i.OutputRegisterOrOperand(); |
| 336 if (output.type == kRegister) { |
| 337 __ negl(output.reg); |
| 338 } else { |
| 339 __ negl(output.operand); |
| 340 } |
| 341 break; |
| 342 } |
| 343 case kX64Or32: |
| 344 ASSEMBLE_BINOP(orl); |
| 345 break; |
| 346 case kX64Or: |
| 347 ASSEMBLE_BINOP(orq); |
| 348 break; |
| 349 case kX64Xor32: |
| 350 ASSEMBLE_BINOP(xorl); |
| 351 break; |
| 352 case kX64Xor: |
| 353 ASSEMBLE_BINOP(xorq); |
| 354 break; |
| 355 case kX64Shl32: |
| 356 ASSEMBLE_SHIFT(shll, 5); |
| 357 break; |
| 358 case kX64Shl: |
| 359 ASSEMBLE_SHIFT(shlq, 6); |
| 360 break; |
| 361 case kX64Shr32: |
| 362 ASSEMBLE_SHIFT(shrl, 5); |
| 363 break; |
| 364 case kX64Shr: |
| 365 ASSEMBLE_SHIFT(shrq, 6); |
| 366 break; |
| 367 case kX64Sar32: |
| 368 ASSEMBLE_SHIFT(sarl, 5); |
| 369 break; |
| 370 case kX64Sar: |
| 371 ASSEMBLE_SHIFT(sarq, 6); |
| 372 break; |
| 373 case kX64Push: { |
| 374 RegisterOrOperand input = i.InputRegisterOrOperand(0); |
| 375 if (input.type == kRegister) { |
| 376 __ pushq(input.reg); |
| 377 } else { |
| 378 __ pushq(input.operand); |
| 379 } |
| 380 break; |
| 381 } |
| 382 case kX64PushI: |
| 383 __ pushq(i.InputImmediate(0)); |
| 384 break; |
| 385 case kX64CallCodeObject: { |
| 386 if (HasImmediateInput(instr, 0)) { |
| 387 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); |
| 388 __ Call(code, RelocInfo::CODE_TARGET); |
| 389 } else { |
| 390 Register reg = i.InputRegister(0); |
| 391 int entry = Code::kHeaderSize - kHeapObjectTag; |
| 392 __ Call(Operand(reg, entry)); |
| 393 } |
| 394 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 395 Safepoint::kNoLazyDeopt); |
| 396 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); |
| 397 if (lazy_deopt) { |
| 398 RecordLazyDeoptimizationEntry(instr); |
| 399 } |
| 400 AddNopForSmiCodeInlining(); |
| 401 break; |
| 402 } |
| 403 case kX64CallAddress: |
| 404 if (HasImmediateInput(instr, 0)) { |
| 405 Immediate64 imm = i.InputImmediate64(0); |
| 406 ASSERT_EQ(kImm64Value, imm.type); |
| 407 __ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64); |
| 408 } else { |
| 409 __ call(i.InputRegister(0)); |
| 410 } |
| 411 break; |
| 412 case kPopStack: { |
| 413 int words = MiscField::decode(instr->opcode()); |
| 414 __ addq(rsp, Immediate(kPointerSize * words)); |
| 415 break; |
| 416 } |
| 417 case kX64CallJSFunction: { |
| 418 Register func = i.InputRegister(0); |
| 419 |
| 420 // TODO(jarin) The load of the context should be separated from the call. |
| 421 __ movp(rsi, FieldOperand(func, JSFunction::kContextOffset)); |
| 422 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset)); |
| 423 |
| 424 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 425 Safepoint::kNoLazyDeopt); |
| 426 RecordLazyDeoptimizationEntry(instr); |
| 427 break; |
| 428 } |
| 429 case kSSEFloat64Cmp: { |
| 430 RegisterOrOperand input = i.InputRegisterOrOperand(1); |
| 431 if (input.type == kDoubleRegister) { |
| 432 __ ucomisd(i.InputDoubleRegister(0), input.double_reg); |
| 433 } else { |
| 434 __ ucomisd(i.InputDoubleRegister(0), input.operand); |
| 435 } |
| 436 break; |
| 437 } |
| 438 case kSSEFloat64Add: |
| 439 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
| 440 break; |
| 441 case kSSEFloat64Sub: |
| 442 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
| 443 break; |
| 444 case kSSEFloat64Mul: |
| 445 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
| 446 break; |
| 447 case kSSEFloat64Div: |
| 448 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
| 449 break; |
| 450 case kSSEFloat64Mod: { |
| 451 __ subq(rsp, Immediate(kDoubleSize)); |
| 452 // Move values to st(0) and st(1). |
| 453 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1)); |
| 454 __ fld_d(Operand(rsp, 0)); |
| 455 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0)); |
| 456 __ fld_d(Operand(rsp, 0)); |
| 457 // Loop while fprem isn't done. |
| 458 Label mod_loop; |
| 459 __ bind(&mod_loop); |
| 460 // This instructions traps on all kinds inputs, but we are assuming the |
| 461 // floating point control word is set to ignore them all. |
| 462 __ fprem(); |
| 463 // The following 2 instruction implicitly use rax. |
| 464 __ fnstsw_ax(); |
| 465 if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) { |
| 466 __ sahf(); |
| 467 } else { |
| 468 __ shrl(rax, Immediate(8)); |
| 469 __ andl(rax, Immediate(0xFF)); |
| 470 __ pushq(rax); |
| 471 __ popfq(); |
| 472 } |
| 473 __ j(parity_even, &mod_loop); |
| 474 // Move output to stack and clean up. |
| 475 __ fstp(1); |
| 476 __ fstp_d(Operand(rsp, 0)); |
| 477 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0)); |
| 478 __ addq(rsp, Immediate(kDoubleSize)); |
| 479 break; |
| 480 } |
| 481 case kX64Int32ToInt64: |
| 482 __ movzxwq(i.OutputRegister(), i.InputRegister(0)); |
| 483 break; |
| 484 case kX64Int64ToInt32: |
| 485 __ Move(i.OutputRegister(), i.InputRegister(0)); |
| 486 break; |
| 487 case kSSEFloat64ToInt32: { |
| 488 RegisterOrOperand input = i.InputRegisterOrOperand(0); |
| 489 if (input.type == kDoubleRegister) { |
| 490 __ cvttsd2si(i.OutputRegister(), input.double_reg); |
| 491 } else { |
| 492 __ cvttsd2si(i.OutputRegister(), input.operand); |
| 493 } |
| 494 break; |
| 495 } |
| 496 case kSSEInt32ToFloat64: { |
| 497 RegisterOrOperand input = i.InputRegisterOrOperand(0); |
| 498 if (input.type == kRegister) { |
| 499 __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg); |
| 500 } else { |
| 501 __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand); |
| 502 } |
| 503 break; |
| 504 } |
| 505 case kSSELoad: |
| 506 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand()); |
| 507 break; |
| 508 case kSSEStore: { |
| 509 int index = 0; |
| 510 Operand operand = i.MemoryOperand(&index); |
| 511 __ movsd(operand, i.InputDoubleRegister(index)); |
| 512 break; |
| 513 } |
| 514 case kX64LoadWord8: |
| 515 __ movzxbl(i.OutputRegister(), i.MemoryOperand()); |
| 516 break; |
| 517 case kX64StoreWord8: { |
| 518 int index = 0; |
| 519 Operand operand = i.MemoryOperand(&index); |
| 520 __ movb(operand, i.InputRegister(index)); |
| 521 break; |
| 522 } |
| 523 case kX64StoreWord8I: { |
| 524 int index = 0; |
| 525 Operand operand = i.MemoryOperand(&index); |
| 526 __ movb(operand, Immediate(i.InputInt8(index))); |
| 527 break; |
| 528 } |
| 529 case kX64LoadWord16: |
| 530 __ movzxwl(i.OutputRegister(), i.MemoryOperand()); |
| 531 break; |
| 532 case kX64StoreWord16: { |
| 533 int index = 0; |
| 534 Operand operand = i.MemoryOperand(&index); |
| 535 __ movw(operand, i.InputRegister(index)); |
| 536 break; |
| 537 } |
| 538 case kX64StoreWord16I: { |
| 539 int index = 0; |
| 540 Operand operand = i.MemoryOperand(&index); |
| 541 __ movw(operand, Immediate(i.InputInt16(index))); |
| 542 break; |
| 543 } |
| 544 case kX64LoadWord32: |
| 545 __ movl(i.OutputRegister(), i.MemoryOperand()); |
| 546 break; |
| 547 case kX64StoreWord32: { |
| 548 int index = 0; |
| 549 Operand operand = i.MemoryOperand(&index); |
| 550 __ movl(operand, i.InputRegister(index)); |
| 551 break; |
| 552 } |
| 553 case kX64StoreWord32I: { |
| 554 int index = 0; |
| 555 Operand operand = i.MemoryOperand(&index); |
| 556 __ movl(operand, i.InputImmediate(index)); |
| 557 break; |
| 558 } |
| 559 case kX64LoadWord64: |
| 560 __ movq(i.OutputRegister(), i.MemoryOperand()); |
| 561 break; |
| 562 case kX64StoreWord64: { |
| 563 int index = 0; |
| 564 Operand operand = i.MemoryOperand(&index); |
| 565 __ movq(operand, i.InputRegister(index)); |
| 566 break; |
| 567 } |
| 568 case kX64StoreWord64I: { |
| 569 int index = 0; |
| 570 Operand operand = i.MemoryOperand(&index); |
| 571 __ movq(operand, i.InputImmediate(index)); |
| 572 break; |
| 573 } |
| 574 case kX64StoreWriteBarrier: { |
| 575 Register object = i.InputRegister(0); |
| 576 Register index = i.InputRegister(1); |
| 577 Register value = i.InputRegister(2); |
| 578 __ movsxlq(index, index); |
| 579 __ movq(Operand(object, index, times_1, 0), value); |
| 580 __ leaq(index, Operand(object, index, times_1, 0)); |
| 581 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters() |
| 582 ? kSaveFPRegs |
| 583 : kDontSaveFPRegs; |
| 584 __ RecordWrite(object, index, value, mode); |
| 585 break; |
| 586 } |
| 587 } |
| 588 } |
| 589 |
| 590 |
| 591 // Assembles branches after this instruction. |
| 592 void CodeGenerator::AssembleArchBranch(Instruction* instr, |
| 593 FlagsCondition condition) { |
| 594 X64OperandConverter i(this, instr); |
| 595 Label done; |
| 596 |
| 597 // Emit a branch. The true and false targets are always the last two inputs |
| 598 // to the instruction. |
| 599 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2); |
| 600 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1); |
| 601 bool fallthru = IsNextInAssemblyOrder(fblock); |
| 602 Label* tlabel = code()->GetLabel(tblock); |
| 603 Label* flabel = fallthru ? &done : code()->GetLabel(fblock); |
| 604 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar; |
| 605 switch (condition) { |
| 606 case kUnorderedEqual: |
| 607 __ j(parity_even, flabel, flabel_distance); |
| 608 // Fall through. |
| 609 case kEqual: |
| 610 __ j(equal, tlabel); |
| 611 break; |
| 612 case kUnorderedNotEqual: |
| 613 __ j(parity_even, tlabel); |
| 614 // Fall through. |
| 615 case kNotEqual: |
| 616 __ j(not_equal, tlabel); |
| 617 break; |
| 618 case kSignedLessThan: |
| 619 __ j(less, tlabel); |
| 620 break; |
| 621 case kSignedGreaterThanOrEqual: |
| 622 __ j(greater_equal, tlabel); |
| 623 break; |
| 624 case kSignedLessThanOrEqual: |
| 625 __ j(less_equal, tlabel); |
| 626 break; |
| 627 case kSignedGreaterThan: |
| 628 __ j(greater, tlabel); |
| 629 break; |
| 630 case kUnorderedLessThan: |
| 631 __ j(parity_even, flabel, flabel_distance); |
| 632 // Fall through. |
| 633 case kUnsignedLessThan: |
| 634 __ j(below, tlabel); |
| 635 break; |
| 636 case kUnorderedGreaterThanOrEqual: |
| 637 __ j(parity_even, tlabel); |
| 638 // Fall through. |
| 639 case kUnsignedGreaterThanOrEqual: |
| 640 __ j(above_equal, tlabel); |
| 641 break; |
| 642 case kUnorderedLessThanOrEqual: |
| 643 __ j(parity_even, flabel, flabel_distance); |
| 644 // Fall through. |
| 645 case kUnsignedLessThanOrEqual: |
| 646 __ j(below_equal, tlabel); |
| 647 break; |
| 648 case kUnorderedGreaterThan: |
| 649 __ j(parity_even, tlabel); |
| 650 // Fall through. |
| 651 case kUnsignedGreaterThan: |
| 652 __ j(above, tlabel); |
| 653 break; |
| 654 } |
| 655 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel. |
| 656 __ bind(&done); |
| 657 } |
| 658 |
| 659 |
| 660 // Assembles boolean materializations after this instruction. |
| 661 void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
| 662 FlagsCondition condition) { |
| 663 X64OperandConverter i(this, instr); |
| 664 Label done; |
| 665 |
| 666 // Materialize a full 32-bit 1 or 0 value. |
| 667 Label check; |
| 668 Register reg = i.OutputRegister(); |
| 669 Condition cc = no_condition; |
| 670 switch (condition) { |
| 671 case kUnorderedEqual: |
| 672 __ j(parity_odd, &check, Label::kNear); |
| 673 __ movl(reg, Immediate(0)); |
| 674 __ jmp(&done, Label::kNear); |
| 675 // Fall through. |
| 676 case kEqual: |
| 677 cc = equal; |
| 678 break; |
| 679 case kUnorderedNotEqual: |
| 680 __ j(parity_odd, &check, Label::kNear); |
| 681 __ movl(reg, Immediate(1)); |
| 682 __ jmp(&done, Label::kNear); |
| 683 // Fall through. |
| 684 case kNotEqual: |
| 685 cc = not_equal; |
| 686 break; |
| 687 case kSignedLessThan: |
| 688 cc = less; |
| 689 break; |
| 690 case kSignedGreaterThanOrEqual: |
| 691 cc = greater_equal; |
| 692 break; |
| 693 case kSignedLessThanOrEqual: |
| 694 cc = less_equal; |
| 695 break; |
| 696 case kSignedGreaterThan: |
| 697 cc = greater; |
| 698 break; |
| 699 case kUnorderedLessThan: |
| 700 __ j(parity_odd, &check, Label::kNear); |
| 701 __ movl(reg, Immediate(0)); |
| 702 __ jmp(&done, Label::kNear); |
| 703 // Fall through. |
| 704 case kUnsignedLessThan: |
| 705 cc = below; |
| 706 break; |
| 707 case kUnorderedGreaterThanOrEqual: |
| 708 __ j(parity_odd, &check, Label::kNear); |
| 709 __ movl(reg, Immediate(1)); |
| 710 __ jmp(&done, Label::kNear); |
| 711 // Fall through. |
| 712 case kUnsignedGreaterThanOrEqual: |
| 713 cc = above_equal; |
| 714 break; |
| 715 case kUnorderedLessThanOrEqual: |
| 716 __ j(parity_odd, &check, Label::kNear); |
| 717 __ movl(reg, Immediate(0)); |
| 718 __ jmp(&done, Label::kNear); |
| 719 // Fall through. |
| 720 case kUnsignedLessThanOrEqual: |
| 721 cc = below_equal; |
| 722 break; |
| 723 case kUnorderedGreaterThan: |
| 724 __ j(parity_odd, &check, Label::kNear); |
| 725 __ movl(reg, Immediate(1)); |
| 726 __ jmp(&done, Label::kNear); |
| 727 // Fall through. |
| 728 case kUnsignedGreaterThan: |
| 729 cc = above; |
| 730 break; |
| 731 } |
| 732 __ bind(&check); |
| 733 __ setcc(cc, reg); |
| 734 __ movzxbl(reg, reg); |
| 735 __ bind(&done); |
| 736 } |
| 737 |
| 738 |
| 739 void CodeGenerator::AssemblePrologue() { |
| 740 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| 741 int stack_slots = frame()->GetSpillSlotCount(); |
| 742 if (descriptor->kind() == CallDescriptor::kCallAddress) { |
| 743 __ pushq(rbp); |
| 744 __ movq(rbp, rsp); |
| 745 const RegList saves = descriptor->CalleeSavedRegisters(); |
| 746 if (saves != 0) { // Save callee-saved registers. |
| 747 int register_save_area_size = 0; |
| 748 for (int i = Register::kNumRegisters - 1; i >= 0; i--) { |
| 749 if (!((1 << i) & saves)) continue; |
| 750 __ pushq(Register::from_code(i)); |
| 751 register_save_area_size += kPointerSize; |
| 752 } |
| 753 frame()->SetRegisterSaveAreaSize(register_save_area_size); |
| 754 } |
| 755 } else if (descriptor->IsJSFunctionCall()) { |
| 756 CompilationInfo* info = linkage()->info(); |
| 757 __ Prologue(info->IsCodePreAgingActive()); |
| 758 frame()->SetRegisterSaveAreaSize( |
| 759 StandardFrameConstants::kFixedFrameSizeFromFp); |
| 760 |
| 761 // Sloppy mode functions and builtins need to replace the receiver with the |
| 762 // global proxy when called as functions (without an explicit receiver |
| 763 // object). |
| 764 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? |
| 765 if (info->strict_mode() == SLOPPY && !info->is_native()) { |
| 766 Label ok; |
| 767 StackArgumentsAccessor args(rbp, info->scope()->num_parameters()); |
| 768 __ movp(rcx, args.GetReceiverOperand()); |
| 769 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex); |
| 770 __ j(not_equal, &ok, Label::kNear); |
| 771 __ movp(rcx, GlobalObjectOperand()); |
| 772 __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset)); |
| 773 __ movp(args.GetReceiverOperand(), rcx); |
| 774 __ bind(&ok); |
| 775 } |
| 776 |
| 777 } else { |
| 778 __ StubPrologue(); |
| 779 frame()->SetRegisterSaveAreaSize( |
| 780 StandardFrameConstants::kFixedFrameSizeFromFp); |
| 781 } |
| 782 if (stack_slots > 0) { |
| 783 __ subq(rsp, Immediate(stack_slots * kPointerSize)); |
| 784 } |
| 785 } |
| 786 |
| 787 |
| 788 void CodeGenerator::AssembleReturn() { |
| 789 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| 790 if (descriptor->kind() == CallDescriptor::kCallAddress) { |
| 791 if (frame()->GetRegisterSaveAreaSize() > 0) { |
| 792 // Remove this frame's spill slots first. |
| 793 int stack_slots = frame()->GetSpillSlotCount(); |
| 794 if (stack_slots > 0) { |
| 795 __ addq(rsp, Immediate(stack_slots * kPointerSize)); |
| 796 } |
| 797 const RegList saves = descriptor->CalleeSavedRegisters(); |
| 798 // Restore registers. |
| 799 if (saves != 0) { |
| 800 for (int i = 0; i < Register::kNumRegisters; i++) { |
| 801 if (!((1 << i) & saves)) continue; |
| 802 __ popq(Register::from_code(i)); |
| 803 } |
| 804 } |
| 805 __ popq(rbp); // Pop caller's frame pointer. |
| 806 __ ret(0); |
| 807 } else { |
| 808 // No saved registers. |
| 809 __ movq(rsp, rbp); // Move stack pointer back to frame pointer. |
| 810 __ popq(rbp); // Pop caller's frame pointer. |
| 811 __ ret(0); |
| 812 } |
| 813 } else { |
| 814 __ movq(rsp, rbp); // Move stack pointer back to frame pointer. |
| 815 __ popq(rbp); // Pop caller's frame pointer. |
| 816 int pop_count = |
| 817 descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0; |
| 818 __ ret(pop_count * kPointerSize); |
| 819 } |
| 820 } |
| 821 |
| 822 |
| 823 void CodeGenerator::AssembleMove(InstructionOperand* source, |
| 824 InstructionOperand* destination) { |
| 825 X64OperandConverter g(this, NULL); |
| 826 // Dispatch on the source and destination operand kinds. Not all |
| 827 // combinations are possible. |
| 828 if (source->IsRegister()) { |
| 829 ASSERT(destination->IsRegister() || destination->IsStackSlot()); |
| 830 Register src = g.ToRegister(source); |
| 831 if (destination->IsRegister()) { |
| 832 __ movq(g.ToRegister(destination), src); |
| 833 } else { |
| 834 __ movq(g.ToOperand(destination), src); |
| 835 } |
| 836 } else if (source->IsStackSlot()) { |
| 837 ASSERT(destination->IsRegister() || destination->IsStackSlot()); |
| 838 Operand src = g.ToOperand(source); |
| 839 if (destination->IsRegister()) { |
| 840 Register dst = g.ToRegister(destination); |
| 841 __ movq(dst, src); |
| 842 } else { |
| 843 // Spill on demand to use a temporary register for memory-to-memory |
| 844 // moves. |
| 845 Register tmp = kScratchRegister; |
| 846 Operand dst = g.ToOperand(destination); |
| 847 __ movq(tmp, src); |
| 848 __ movq(dst, tmp); |
| 849 } |
| 850 } else if (source->IsConstant()) { |
| 851 ConstantOperand* constant_source = ConstantOperand::cast(source); |
| 852 if (destination->IsRegister() || destination->IsStackSlot()) { |
| 853 Register dst = destination->IsRegister() ? g.ToRegister(destination) |
| 854 : kScratchRegister; |
| 855 Immediate64 imm = g.ToImmediate64(constant_source); |
| 856 switch (imm.type) { |
| 857 case kImm64Value: |
| 858 __ Set(dst, imm.value); |
| 859 break; |
| 860 case kImm64Reference: |
| 861 __ Move(dst, imm.reference); |
| 862 break; |
| 863 case kImm64Handle: |
| 864 __ Move(dst, imm.handle); |
| 865 break; |
| 866 } |
| 867 if (destination->IsStackSlot()) { |
| 868 __ movq(g.ToOperand(destination), kScratchRegister); |
| 869 } |
| 870 } else { |
| 871 __ movq(kScratchRegister, |
| 872 BitCast<uint64_t, double>(g.ToDouble(constant_source))); |
| 873 if (destination->IsDoubleRegister()) { |
| 874 __ movq(g.ToDoubleRegister(destination), kScratchRegister); |
| 875 } else { |
| 876 ASSERT(destination->IsDoubleStackSlot()); |
| 877 __ movq(g.ToOperand(destination), kScratchRegister); |
| 878 } |
| 879 } |
| 880 } else if (source->IsDoubleRegister()) { |
| 881 XMMRegister src = g.ToDoubleRegister(source); |
| 882 if (destination->IsDoubleRegister()) { |
| 883 XMMRegister dst = g.ToDoubleRegister(destination); |
| 884 __ movsd(dst, src); |
| 885 } else { |
| 886 ASSERT(destination->IsDoubleStackSlot()); |
| 887 Operand dst = g.ToOperand(destination); |
| 888 __ movsd(dst, src); |
| 889 } |
| 890 } else if (source->IsDoubleStackSlot()) { |
| 891 ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); |
| 892 Operand src = g.ToOperand(source); |
| 893 if (destination->IsDoubleRegister()) { |
| 894 XMMRegister dst = g.ToDoubleRegister(destination); |
| 895 __ movsd(dst, src); |
| 896 } else { |
| 897 // We rely on having xmm0 available as a fixed scratch register. |
| 898 Operand dst = g.ToOperand(destination); |
| 899 __ movsd(xmm0, src); |
| 900 __ movsd(dst, xmm0); |
| 901 } |
| 902 } else { |
| 903 UNREACHABLE(); |
| 904 } |
| 905 } |
| 906 |
| 907 |
| 908 void CodeGenerator::AssembleSwap(InstructionOperand* source, |
| 909 InstructionOperand* destination) { |
| 910 X64OperandConverter g(this, NULL); |
| 911 // Dispatch on the source and destination operand kinds. Not all |
| 912 // combinations are possible. |
| 913 if (source->IsRegister() && destination->IsRegister()) { |
| 914 // Register-register. |
| 915 __ xchgq(g.ToRegister(source), g.ToRegister(destination)); |
| 916 } else if (source->IsRegister() && destination->IsStackSlot()) { |
| 917 Register src = g.ToRegister(source); |
| 918 Operand dst = g.ToOperand(destination); |
| 919 __ xchgq(src, dst); |
| 920 } else if ((source->IsStackSlot() && destination->IsStackSlot()) || |
| 921 (source->IsDoubleStackSlot() && |
| 922 destination->IsDoubleStackSlot())) { |
| 923 // Memory-memory. |
| 924 Register tmp = kScratchRegister; |
| 925 Operand src = g.ToOperand(source); |
| 926 Operand dst = g.ToOperand(destination); |
| 927 __ movq(tmp, dst); |
| 928 __ xchgq(tmp, src); |
| 929 __ movq(dst, tmp); |
| 930 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { |
| 931 // XMM register-register swap. We rely on having xmm0 |
| 932 // available as a fixed scratch register. |
| 933 XMMRegister src = g.ToDoubleRegister(source); |
| 934 XMMRegister dst = g.ToDoubleRegister(destination); |
| 935 __ movsd(xmm0, src); |
| 936 __ movsd(src, dst); |
| 937 __ movsd(dst, xmm0); |
| 938 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { |
| 939 // XMM register-memory swap. We rely on having xmm0 |
| 940 // available as a fixed scratch register. |
| 941 XMMRegister src = g.ToDoubleRegister(source); |
| 942 Operand dst = g.ToOperand(destination); |
| 943 __ movsd(xmm0, src); |
| 944 __ movsd(src, dst); |
| 945 __ movsd(dst, xmm0); |
| 946 } else { |
| 947 // No other combinations are possible. |
| 948 UNREACHABLE(); |
| 949 } |
| 950 } |
| 951 |
| 952 |
| 953 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); } |
| 954 |
| 955 #undef __ |
| 956 |
| 957 #ifdef DEBUG |
| 958 |
| 959 // Checks whether the code between start_pc and end_pc is a no-op. |
| 960 bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, |
| 961 int end_pc) { |
| 962 if (start_pc + 1 != end_pc) { |
| 963 return false; |
| 964 } |
| 965 return *(code->instruction_start() + start_pc) == |
| 966 v8::internal::Assembler::kNopByte; |
| 967 } |
| 968 |
| 969 #endif |
| 970 } |
| 971 } |
| 972 } // namespace v8::internal::compiler |
OLD | NEW |