OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/compiler/code-generator.h" |
| 6 #include "src/compiler/code-generator-impl.h" |
| 7 #include "src/compiler/gap-resolver.h" |
| 8 #include "src/compiler/node-matchers.h" |
| 9 #include "src/compiler/node-properties-inl.h" |
| 10 #include "src/mips/macro-assembler-mips.h" |
| 11 #include "src/scopes.h" |
| 12 |
| 13 namespace v8 { |
| 14 namespace internal { |
| 15 namespace compiler { |
| 16 |
| 17 #define __ masm()-> |
| 18 |
| 19 |
| 20 // TODO(plind): Possibly avoid using these lithium names. |
| 21 #define kScratchReg kLithiumScratchReg |
| 22 #define kCompareReg kLithiumScratchReg2 |
| 23 #define kScratchDoubleReg kLithiumScratchDouble |
| 24 |
| 25 |
| 26 // TODO(plind): consider renaming these macros. |
| 27 #define TRACE_MSG(msg) \ |
| 28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ |
| 29 __LINE__) |
| 30 |
| 31 #define TRACE_UNIMPL() \ |
| 32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \ |
| 33 __LINE__) |
| 34 |
| 35 |
| 36 // Adds Mips-specific methods to convert InstructionOperands. |
| 37 class MipsOperandConverter FINAL : public InstructionOperandConverter { |
| 38 public: |
| 39 MipsOperandConverter(CodeGenerator* gen, Instruction* instr) |
| 40 : InstructionOperandConverter(gen, instr) {} |
| 41 |
| 42 FloatRegister OutputSingleRegister(int index = 0) { |
| 43 return ToSingleRegister(instr_->OutputAt(index)); |
| 44 } |
| 45 |
| 46 FloatRegister InputSingleRegister(int index) { |
| 47 return ToSingleRegister(instr_->InputAt(index)); |
| 48 } |
| 49 |
| 50 FloatRegister ToSingleRegister(InstructionOperand* op) { |
| 51 // Single (Float) and Double register namespace is same on MIPS, |
| 52 // both are typedefs of FPURegister. |
| 53 return ToDoubleRegister(op); |
| 54 } |
| 55 |
| 56 Operand InputImmediate(int index) { |
| 57 Constant constant = ToConstant(instr_->InputAt(index)); |
| 58 switch (constant.type()) { |
| 59 case Constant::kInt32: |
| 60 return Operand(constant.ToInt32()); |
| 61 case Constant::kFloat32: |
| 62 return Operand( |
| 63 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED)); |
| 64 case Constant::kFloat64: |
| 65 return Operand( |
| 66 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); |
| 67 case Constant::kInt64: |
| 68 case Constant::kExternalReference: |
| 69 case Constant::kHeapObject: |
| 70 // TODO(plind): Maybe we should handle ExtRef & HeapObj here? |
| 71 // maybe not done on arm due to const pool ?? |
| 72 break; |
| 73 } |
| 74 UNREACHABLE(); |
| 75 return Operand(zero_reg); |
| 76 } |
| 77 |
| 78 Operand InputOperand(int index) { |
| 79 InstructionOperand* op = instr_->InputAt(index); |
| 80 if (op->IsRegister()) { |
| 81 return Operand(ToRegister(op)); |
| 82 } |
| 83 return InputImmediate(index); |
| 84 } |
| 85 |
| 86 MemOperand MemoryOperand(int* first_index) { |
| 87 const int index = *first_index; |
| 88 switch (AddressingModeField::decode(instr_->opcode())) { |
| 89 case kMode_None: |
| 90 break; |
| 91 case kMode_MRI: |
| 92 *first_index += 2; |
| 93 return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); |
| 94 case kMode_MRR: |
| 95 // TODO(plind): r6 address mode, to be implemented ... |
| 96 UNREACHABLE(); |
| 97 } |
| 98 UNREACHABLE(); |
| 99 return MemOperand(no_reg); |
| 100 } |
| 101 |
| 102 MemOperand MemoryOperand() { |
| 103 int index = 0; |
| 104 return MemoryOperand(&index); |
| 105 } |
| 106 |
| 107 MemOperand ToMemOperand(InstructionOperand* op) const { |
| 108 DCHECK(op != NULL); |
| 109 DCHECK(!op->IsRegister()); |
| 110 DCHECK(!op->IsDoubleRegister()); |
| 111 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); |
| 112 // The linkage computes where all spill slots are located. |
| 113 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0); |
| 114 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); |
| 115 } |
| 116 }; |
| 117 |
| 118 |
| 119 static inline bool HasRegisterInput(Instruction* instr, int index) { |
| 120 return instr->InputAt(index)->IsRegister(); |
| 121 } |
| 122 |
| 123 |
| 124 // Assembles an instruction after register allocation, producing machine code. |
| 125 void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
| 126 MipsOperandConverter i(this, instr); |
| 127 InstructionCode opcode = instr->opcode(); |
| 128 |
| 129 switch (ArchOpcodeField::decode(opcode)) { |
| 130 case kArchCallCodeObject: { |
| 131 EnsureSpaceForLazyDeopt(); |
| 132 if (instr->InputAt(0)->IsImmediate()) { |
| 133 __ Call(Handle<Code>::cast(i.InputHeapObject(0)), |
| 134 RelocInfo::CODE_TARGET); |
| 135 } else { |
| 136 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); |
| 137 __ Call(at); |
| 138 } |
| 139 AddSafepointAndDeopt(instr); |
| 140 break; |
| 141 } |
| 142 case kArchCallJSFunction: { |
| 143 EnsureSpaceForLazyDeopt(); |
| 144 Register func = i.InputRegister(0); |
| 145 if (FLAG_debug_code) { |
| 146 // Check the function's context matches the context argument. |
| 147 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); |
| 148 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg)); |
| 149 } |
| 150 |
| 151 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); |
| 152 __ Call(at); |
| 153 AddSafepointAndDeopt(instr); |
| 154 break; |
| 155 } |
| 156 case kArchJmp: |
| 157 __ Branch(code_->GetLabel(i.InputBlock(0))); |
| 158 break; |
| 159 case kArchNop: |
| 160 // don't emit code for nops. |
| 161 break; |
| 162 case kArchRet: |
| 163 AssembleReturn(); |
| 164 break; |
| 165 case kArchStackPointer: |
| 166 __ mov(i.OutputRegister(), sp); |
| 167 break; |
| 168 case kArchTruncateDoubleToI: |
| 169 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0)); |
| 170 break; |
| 171 case kMipsAdd: |
| 172 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 173 break; |
| 174 case kMipsAddOvf: |
| 175 __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), |
| 176 i.InputOperand(1), kCompareReg, kScratchReg); |
| 177 break; |
| 178 case kMipsSub: |
| 179 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 180 break; |
| 181 case kMipsSubOvf: |
| 182 __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), |
| 183 i.InputOperand(1), kCompareReg, kScratchReg); |
| 184 break; |
| 185 case kMipsMul: |
| 186 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 187 break; |
| 188 case kMipsDiv: |
| 189 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 190 break; |
| 191 case kMipsDivU: |
| 192 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 193 break; |
| 194 case kMipsMod: |
| 195 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 196 break; |
| 197 case kMipsModU: |
| 198 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 199 break; |
| 200 case kMipsAnd: |
| 201 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 202 break; |
| 203 case kMipsOr: |
| 204 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 205 break; |
| 206 case kMipsXor: |
| 207 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 208 break; |
| 209 case kMipsShl: |
| 210 if (instr->InputAt(1)->IsRegister()) { |
| 211 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| 212 } else { |
| 213 int32_t imm = i.InputOperand(1).immediate(); |
| 214 __ sll(i.OutputRegister(), i.InputRegister(0), imm); |
| 215 } |
| 216 break; |
| 217 case kMipsShr: |
| 218 if (instr->InputAt(1)->IsRegister()) { |
| 219 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| 220 } else { |
| 221 int32_t imm = i.InputOperand(1).immediate(); |
| 222 __ srl(i.OutputRegister(), i.InputRegister(0), imm); |
| 223 } |
| 224 break; |
| 225 case kMipsSar: |
| 226 if (instr->InputAt(1)->IsRegister()) { |
| 227 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| 228 } else { |
| 229 int32_t imm = i.InputOperand(1).immediate(); |
| 230 __ sra(i.OutputRegister(), i.InputRegister(0), imm); |
| 231 } |
| 232 break; |
| 233 case kMipsRor: |
| 234 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| 235 break; |
| 236 case kMipsTst: |
| 237 // Psuedo-instruction used for tst/branch. |
| 238 __ And(kCompareReg, i.InputRegister(0), i.InputOperand(1)); |
| 239 break; |
| 240 case kMipsCmp: |
| 241 // Psuedo-instruction used for cmp/branch. No opcode emitted here. |
| 242 break; |
| 243 case kMipsMov: |
| 244 // TODO(plind): Should we combine mov/li like this, or use separate instr? |
| 245 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType |
| 246 if (HasRegisterInput(instr, 0)) { |
| 247 __ mov(i.OutputRegister(), i.InputRegister(0)); |
| 248 } else { |
| 249 __ li(i.OutputRegister(), i.InputOperand(0)); |
| 250 } |
| 251 break; |
| 252 |
| 253 case kMipsCmpD: |
| 254 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. |
| 255 break; |
| 256 case kMipsAddD: |
| 257 // TODO(plind): add special case: combine mult & add. |
| 258 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 259 i.InputDoubleRegister(1)); |
| 260 break; |
| 261 case kMipsSubD: |
| 262 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 263 i.InputDoubleRegister(1)); |
| 264 break; |
| 265 case kMipsMulD: |
| 266 // TODO(plind): add special case: right op is -1.0, see arm port. |
| 267 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 268 i.InputDoubleRegister(1)); |
| 269 break; |
| 270 case kMipsDivD: |
| 271 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 272 i.InputDoubleRegister(1)); |
| 273 break; |
| 274 case kMipsModD: { |
| 275 // TODO(bmeurer): We should really get rid of this special instruction, |
| 276 // and generate a CallAddress instruction instead. |
| 277 FrameScope scope(masm(), StackFrame::MANUAL); |
| 278 __ PrepareCallCFunction(0, 2, kScratchReg); |
| 279 __ MovToFloatParameters(i.InputDoubleRegister(0), |
| 280 i.InputDoubleRegister(1)); |
| 281 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), |
| 282 0, 2); |
| 283 // Move the result in the double result register. |
| 284 __ MovFromFloatResult(i.OutputDoubleRegister()); |
| 285 break; |
| 286 } |
| 287 case kMipsSqrtD: { |
| 288 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| 289 break; |
| 290 } |
| 291 case kMipsCvtSD: { |
| 292 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); |
| 293 break; |
| 294 } |
| 295 case kMipsCvtDS: { |
| 296 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); |
| 297 break; |
| 298 } |
| 299 case kMipsCvtDW: { |
| 300 FPURegister scratch = kScratchDoubleReg; |
| 301 __ mtc1(i.InputRegister(0), scratch); |
| 302 __ cvt_d_w(i.OutputDoubleRegister(), scratch); |
| 303 break; |
| 304 } |
| 305 case kMipsCvtDUw: { |
| 306 FPURegister scratch = kScratchDoubleReg; |
| 307 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch); |
| 308 break; |
| 309 } |
| 310 case kMipsTruncWD: { |
| 311 FPURegister scratch = kScratchDoubleReg; |
| 312 // Other arches use round to zero here, so we follow. |
| 313 __ trunc_w_d(scratch, i.InputDoubleRegister(0)); |
| 314 __ mfc1(i.OutputRegister(), scratch); |
| 315 break; |
| 316 } |
| 317 case kMipsTruncUwD: { |
| 318 FPURegister scratch = kScratchDoubleReg; |
| 319 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function. |
| 320 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch); |
| 321 break; |
| 322 } |
| 323 // ... more basic instructions ... |
| 324 |
| 325 case kMipsLbu: |
| 326 __ lbu(i.OutputRegister(), i.MemoryOperand()); |
| 327 break; |
| 328 case kMipsLb: |
| 329 __ lb(i.OutputRegister(), i.MemoryOperand()); |
| 330 break; |
| 331 case kMipsSb: |
| 332 __ sb(i.InputRegister(2), i.MemoryOperand()); |
| 333 break; |
| 334 case kMipsLhu: |
| 335 __ lhu(i.OutputRegister(), i.MemoryOperand()); |
| 336 break; |
| 337 case kMipsLh: |
| 338 __ lh(i.OutputRegister(), i.MemoryOperand()); |
| 339 break; |
| 340 case kMipsSh: |
| 341 __ sh(i.InputRegister(2), i.MemoryOperand()); |
| 342 break; |
| 343 case kMipsLw: |
| 344 __ lw(i.OutputRegister(), i.MemoryOperand()); |
| 345 break; |
| 346 case kMipsSw: |
| 347 __ sw(i.InputRegister(2), i.MemoryOperand()); |
| 348 break; |
| 349 case kMipsLwc1: { |
| 350 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand()); |
| 351 break; |
| 352 } |
| 353 case kMipsSwc1: { |
| 354 int index = 0; |
| 355 MemOperand operand = i.MemoryOperand(&index); |
| 356 __ swc1(i.InputSingleRegister(index), operand); |
| 357 break; |
| 358 } |
| 359 case kMipsLdc1: |
| 360 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); |
| 361 break; |
| 362 case kMipsSdc1: |
| 363 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand()); |
| 364 break; |
| 365 case kMipsPush: |
| 366 __ Push(i.InputRegister(0)); |
| 367 break; |
| 368 case kMipsStoreWriteBarrier: |
| 369 Register object = i.InputRegister(0); |
| 370 Register index = i.InputRegister(1); |
| 371 Register value = i.InputRegister(2); |
| 372 __ addu(index, object, index); |
| 373 __ sw(value, MemOperand(index)); |
| 374 SaveFPRegsMode mode = |
| 375 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; |
| 376 RAStatus ra_status = kRAHasNotBeenSaved; |
| 377 __ RecordWrite(object, index, value, ra_status, mode); |
| 378 break; |
| 379 } |
| 380 } |
| 381 |
| 382 |
| 383 #define UNSUPPORTED_COND(opcode, condition) \ |
| 384 OFStream out(stdout); \ |
| 385 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \ |
| 386 UNIMPLEMENTED(); |
| 387 |
| 388 // Assembles branches after an instruction. |
| 389 void CodeGenerator::AssembleArchBranch(Instruction* instr, |
| 390 FlagsCondition condition) { |
| 391 MipsOperandConverter i(this, instr); |
| 392 Label done; |
| 393 |
| 394 // Emit a branch. The true and false targets are always the last two inputs |
| 395 // to the instruction. |
| 396 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2); |
| 397 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1); |
| 398 bool fallthru = IsNextInAssemblyOrder(fblock); |
| 399 Label* tlabel = code()->GetLabel(tblock); |
| 400 Label* flabel = fallthru ? &done : code()->GetLabel(fblock); |
| 401 Condition cc = kNoCondition; |
| 402 |
| 403 // MIPS does not have condition code flags, so compare and branch are |
| 404 // implemented differently than on the other arch's. The compare operations |
| 405 // emit mips psuedo-instructions, which are handled here by branch |
| 406 // instructions that do the actual comparison. Essential that the input |
| 407 // registers to compare psuedo-op are not modified before this branch op, as |
| 408 // they are tested here. |
| 409 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were |
| 410 // not separated by other instructions. |
| 411 |
| 412 if (instr->arch_opcode() == kMipsTst) { |
| 413 // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register. |
| 414 switch (condition) { |
| 415 case kNotEqual: |
| 416 cc = ne; |
| 417 break; |
| 418 case kEqual: |
| 419 cc = eq; |
| 420 break; |
| 421 default: |
| 422 UNSUPPORTED_COND(kMipsTst, condition); |
| 423 break; |
| 424 } |
| 425 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg)); |
| 426 |
| 427 } else if (instr->arch_opcode() == kMipsAddOvf || |
| 428 instr->arch_opcode() == kMipsSubOvf) { |
| 429 // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow. |
| 430 switch (condition) { |
| 431 case kOverflow: |
| 432 cc = lt; |
| 433 break; |
| 434 case kNotOverflow: |
| 435 cc = ge; |
| 436 break; |
| 437 default: |
| 438 UNSUPPORTED_COND(kMipsAddOvf, condition); |
| 439 break; |
| 440 } |
| 441 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg)); |
| 442 |
| 443 } else if (instr->arch_opcode() == kMipsCmp) { |
| 444 switch (condition) { |
| 445 case kEqual: |
| 446 cc = eq; |
| 447 break; |
| 448 case kNotEqual: |
| 449 cc = ne; |
| 450 break; |
| 451 case kSignedLessThan: |
| 452 cc = lt; |
| 453 break; |
| 454 case kSignedGreaterThanOrEqual: |
| 455 cc = ge; |
| 456 break; |
| 457 case kSignedLessThanOrEqual: |
| 458 cc = le; |
| 459 break; |
| 460 case kSignedGreaterThan: |
| 461 cc = gt; |
| 462 break; |
| 463 case kUnsignedLessThan: |
| 464 cc = lo; |
| 465 break; |
| 466 case kUnsignedGreaterThanOrEqual: |
| 467 cc = hs; |
| 468 break; |
| 469 case kUnsignedLessThanOrEqual: |
| 470 cc = ls; |
| 471 break; |
| 472 case kUnsignedGreaterThan: |
| 473 cc = hi; |
| 474 break; |
| 475 default: |
| 476 UNSUPPORTED_COND(kMipsCmp, condition); |
| 477 break; |
| 478 } |
| 479 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); |
| 480 |
| 481 if (!fallthru) __ Branch(flabel); // no fallthru to flabel. |
| 482 __ bind(&done); |
| 483 |
| 484 } else if (instr->arch_opcode() == kMipsCmpD) { |
| 485 // TODO(dusmil) optimize unordered checks to use less instructions |
| 486 // even if we have to unfold BranchF macro. |
| 487 Label* nan = flabel; |
| 488 switch (condition) { |
| 489 case kUnorderedEqual: |
| 490 cc = eq; |
| 491 break; |
| 492 case kUnorderedNotEqual: |
| 493 cc = ne; |
| 494 nan = tlabel; |
| 495 break; |
| 496 case kUnorderedLessThan: |
| 497 cc = lt; |
| 498 break; |
| 499 case kUnorderedGreaterThanOrEqual: |
| 500 cc = ge; |
| 501 nan = tlabel; |
| 502 break; |
| 503 case kUnorderedLessThanOrEqual: |
| 504 cc = le; |
| 505 break; |
| 506 case kUnorderedGreaterThan: |
| 507 cc = gt; |
| 508 nan = tlabel; |
| 509 break; |
| 510 default: |
| 511 UNSUPPORTED_COND(kMipsCmpD, condition); |
| 512 break; |
| 513 } |
| 514 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0), |
| 515 i.InputDoubleRegister(1)); |
| 516 |
| 517 if (!fallthru) __ Branch(flabel); // no fallthru to flabel. |
| 518 __ bind(&done); |
| 519 |
| 520 } else { |
| 521 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", |
| 522 instr->arch_opcode()); |
| 523 UNIMPLEMENTED(); |
| 524 } |
| 525 } |
| 526 |
| 527 |
| 528 // Assembles boolean materializations after an instruction. |
| 529 void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
| 530 FlagsCondition condition) { |
| 531 MipsOperandConverter i(this, instr); |
| 532 Label done; |
| 533 |
| 534 // Materialize a full 32-bit 1 or 0 value. The result register is always the |
| 535 // last output of the instruction. |
| 536 Label false_value; |
| 537 DCHECK_NE(0, instr->OutputCount()); |
| 538 Register result = i.OutputRegister(instr->OutputCount() - 1); |
| 539 Condition cc = kNoCondition; |
| 540 |
| 541 // MIPS does not have condition code flags, so compare and branch are |
| 542 // implemented differently than on the other arch's. The compare operations |
| 543 // emit mips psuedo-instructions, which are checked and handled here. |
| 544 |
| 545 // For materializations, we use delay slot to set the result true, and |
| 546 // in the false case, where we fall thru the branch, we reset the result |
| 547 // false. |
| 548 |
| 549 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were |
| 550 // not separated by other instructions. |
| 551 if (instr->arch_opcode() == kMipsTst) { |
| 552 // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register. |
| 553 switch (condition) { |
| 554 case kNotEqual: |
| 555 cc = ne; |
| 556 break; |
| 557 case kEqual: |
| 558 cc = eq; |
| 559 break; |
| 560 default: |
| 561 UNSUPPORTED_COND(kMipsTst, condition); |
| 562 break; |
| 563 } |
| 564 __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg)); |
| 565 __ li(result, Operand(1)); // In delay slot. |
| 566 |
| 567 } else if (instr->arch_opcode() == kMipsAddOvf || |
| 568 instr->arch_opcode() == kMipsSubOvf) { |
| 569 // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow. |
| 570 switch (condition) { |
| 571 case kOverflow: |
| 572 cc = lt; |
| 573 break; |
| 574 case kNotOverflow: |
| 575 cc = ge; |
| 576 break; |
| 577 default: |
| 578 UNSUPPORTED_COND(kMipsAddOvf, condition); |
| 579 break; |
| 580 } |
| 581 __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg)); |
| 582 __ li(result, Operand(1)); // In delay slot. |
| 583 |
| 584 |
| 585 } else if (instr->arch_opcode() == kMipsCmp) { |
| 586 Register left = i.InputRegister(0); |
| 587 Operand right = i.InputOperand(1); |
| 588 switch (condition) { |
| 589 case kEqual: |
| 590 cc = eq; |
| 591 break; |
| 592 case kNotEqual: |
| 593 cc = ne; |
| 594 break; |
| 595 case kSignedLessThan: |
| 596 cc = lt; |
| 597 break; |
| 598 case kSignedGreaterThanOrEqual: |
| 599 cc = ge; |
| 600 break; |
| 601 case kSignedLessThanOrEqual: |
| 602 cc = le; |
| 603 break; |
| 604 case kSignedGreaterThan: |
| 605 cc = gt; |
| 606 break; |
| 607 case kUnsignedLessThan: |
| 608 cc = lo; |
| 609 break; |
| 610 case kUnsignedGreaterThanOrEqual: |
| 611 cc = hs; |
| 612 break; |
| 613 case kUnsignedLessThanOrEqual: |
| 614 cc = ls; |
| 615 break; |
| 616 case kUnsignedGreaterThan: |
| 617 cc = hi; |
| 618 break; |
| 619 default: |
| 620 UNSUPPORTED_COND(kMipsCmp, condition); |
| 621 break; |
| 622 } |
| 623 __ Branch(USE_DELAY_SLOT, &done, cc, left, right); |
| 624 __ li(result, Operand(1)); // In delay slot. |
| 625 |
| 626 } else if (instr->arch_opcode() == kMipsCmpD) { |
| 627 FPURegister left = i.InputDoubleRegister(0); |
| 628 FPURegister right = i.InputDoubleRegister(1); |
| 629 // TODO(plind): Provide NaN-testing macro-asm function without need for |
| 630 // BranchF. |
| 631 FPURegister dummy1 = f0; |
| 632 FPURegister dummy2 = f2; |
| 633 switch (condition) { |
| 634 case kUnorderedEqual: |
| 635 // TODO(plind): improve the NaN testing throughout this function. |
| 636 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); |
| 637 cc = eq; |
| 638 break; |
| 639 case kUnorderedNotEqual: |
| 640 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); |
| 641 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN. |
| 642 cc = ne; |
| 643 break; |
| 644 case kUnorderedLessThan: |
| 645 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); |
| 646 cc = lt; |
| 647 break; |
| 648 case kUnorderedGreaterThanOrEqual: |
| 649 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); |
| 650 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN. |
| 651 cc = ge; |
| 652 break; |
| 653 case kUnorderedLessThanOrEqual: |
| 654 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); |
| 655 cc = le; |
| 656 break; |
| 657 case kUnorderedGreaterThan: |
| 658 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); |
| 659 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN. |
| 660 cc = gt; |
| 661 break; |
| 662 default: |
| 663 UNSUPPORTED_COND(kMipsCmp, condition); |
| 664 break; |
| 665 } |
| 666 __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right); |
| 667 __ li(result, Operand(1)); // In delay slot - branch taken returns 1. |
| 668 // Fall-thru (branch not taken) returns 0. |
| 669 |
| 670 } else { |
| 671 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", |
| 672 instr->arch_opcode()); |
| 673 TRACE_UNIMPL(); |
| 674 UNIMPLEMENTED(); |
| 675 } |
| 676 // Fallthru case is the false materialization. |
| 677 __ bind(&false_value); |
| 678 __ li(result, Operand(0)); |
| 679 __ bind(&done); |
| 680 } |
| 681 |
| 682 |
| 683 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) { |
| 684 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( |
| 685 isolate(), deoptimization_id, Deoptimizer::LAZY); |
| 686 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); |
| 687 } |
| 688 |
| 689 |
| 690 void CodeGenerator::AssemblePrologue() { |
| 691 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| 692 if (descriptor->kind() == CallDescriptor::kCallAddress) { |
| 693 __ Push(ra, fp); |
| 694 __ mov(fp, sp); |
| 695 const RegList saves = descriptor->CalleeSavedRegisters(); |
| 696 if (saves != 0) { // Save callee-saved registers. |
| 697 // TODO(plind): make callee save size const, possibly DCHECK it. |
| 698 int register_save_area_size = 0; |
| 699 for (int i = Register::kNumRegisters - 1; i >= 0; i--) { |
| 700 if (!((1 << i) & saves)) continue; |
| 701 register_save_area_size += kPointerSize; |
| 702 } |
| 703 frame()->SetRegisterSaveAreaSize(register_save_area_size); |
| 704 __ MultiPush(saves); |
| 705 } |
| 706 } else if (descriptor->IsJSFunctionCall()) { |
| 707 CompilationInfo* info = linkage()->info(); |
| 708 __ Prologue(info->IsCodePreAgingActive()); |
| 709 frame()->SetRegisterSaveAreaSize( |
| 710 StandardFrameConstants::kFixedFrameSizeFromFp); |
| 711 |
| 712 // Sloppy mode functions and builtins need to replace the receiver with the |
| 713 // global proxy when called as functions (without an explicit receiver |
| 714 // object). |
| 715 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? |
| 716 if (info->strict_mode() == SLOPPY && !info->is_native()) { |
| 717 Label ok; |
| 718 // +2 for return address and saved frame pointer. |
| 719 int receiver_slot = info->scope()->num_parameters() + 2; |
| 720 __ lw(a2, MemOperand(fp, receiver_slot * kPointerSize)); |
| 721 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 722 __ Branch(&ok, ne, a2, Operand(at)); |
| 723 |
| 724 __ lw(a2, GlobalObjectOperand()); |
| 725 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); |
| 726 __ sw(a2, MemOperand(fp, receiver_slot * kPointerSize)); |
| 727 __ bind(&ok); |
| 728 } |
| 729 } else { |
| 730 __ StubPrologue(); |
| 731 frame()->SetRegisterSaveAreaSize( |
| 732 StandardFrameConstants::kFixedFrameSizeFromFp); |
| 733 } |
| 734 int stack_slots = frame()->GetSpillSlotCount(); |
| 735 if (stack_slots > 0) { |
| 736 __ Subu(sp, sp, Operand(stack_slots * kPointerSize)); |
| 737 } |
| 738 } |
| 739 |
| 740 |
| 741 void CodeGenerator::AssembleReturn() { |
| 742 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| 743 if (descriptor->kind() == CallDescriptor::kCallAddress) { |
| 744 if (frame()->GetRegisterSaveAreaSize() > 0) { |
| 745 // Remove this frame's spill slots first. |
| 746 int stack_slots = frame()->GetSpillSlotCount(); |
| 747 if (stack_slots > 0) { |
| 748 __ Addu(sp, sp, Operand(stack_slots * kPointerSize)); |
| 749 } |
| 750 // Restore registers. |
| 751 const RegList saves = descriptor->CalleeSavedRegisters(); |
| 752 if (saves != 0) { |
| 753 __ MultiPop(saves); |
| 754 } |
| 755 } |
| 756 __ mov(sp, fp); |
| 757 __ Pop(ra, fp); |
| 758 __ Ret(); |
| 759 } else { |
| 760 __ mov(sp, fp); |
| 761 __ Pop(ra, fp); |
| 762 int pop_count = descriptor->IsJSFunctionCall() |
| 763 ? static_cast<int>(descriptor->JSParameterCount()) |
| 764 : 0; |
| 765 __ DropAndRet(pop_count); |
| 766 } |
| 767 } |
| 768 |
| 769 |
| 770 void CodeGenerator::AssembleMove(InstructionOperand* source, |
| 771 InstructionOperand* destination) { |
| 772 MipsOperandConverter g(this, NULL); |
| 773 // Dispatch on the source and destination operand kinds. Not all |
| 774 // combinations are possible. |
| 775 if (source->IsRegister()) { |
| 776 DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
| 777 Register src = g.ToRegister(source); |
| 778 if (destination->IsRegister()) { |
| 779 __ mov(g.ToRegister(destination), src); |
| 780 } else { |
| 781 __ sw(src, g.ToMemOperand(destination)); |
| 782 } |
| 783 } else if (source->IsStackSlot()) { |
| 784 DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
| 785 MemOperand src = g.ToMemOperand(source); |
| 786 if (destination->IsRegister()) { |
| 787 __ lw(g.ToRegister(destination), src); |
| 788 } else { |
| 789 Register temp = kScratchReg; |
| 790 __ lw(temp, src); |
| 791 __ sw(temp, g.ToMemOperand(destination)); |
| 792 } |
| 793 } else if (source->IsConstant()) { |
| 794 Constant src = g.ToConstant(source); |
| 795 if (destination->IsRegister() || destination->IsStackSlot()) { |
| 796 Register dst = |
| 797 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; |
| 798 switch (src.type()) { |
| 799 case Constant::kInt32: |
| 800 __ li(dst, Operand(src.ToInt32())); |
| 801 break; |
| 802 case Constant::kFloat32: |
| 803 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED)); |
| 804 break; |
| 805 case Constant::kInt64: |
| 806 UNREACHABLE(); |
| 807 break; |
| 808 case Constant::kFloat64: |
| 809 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED)); |
| 810 break; |
| 811 case Constant::kExternalReference: |
| 812 __ li(dst, Operand(src.ToExternalReference())); |
| 813 break; |
| 814 case Constant::kHeapObject: |
| 815 __ li(dst, src.ToHeapObject()); |
| 816 break; |
| 817 } |
| 818 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination)); |
| 819 } else if (src.type() == Constant::kFloat32) { |
| 820 FPURegister dst = destination->IsDoubleRegister() |
| 821 ? g.ToDoubleRegister(destination) |
| 822 : kScratchDoubleReg.low(); |
| 823 // TODO(turbofan): Can we do better here? |
| 824 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32()))); |
| 825 __ mtc1(at, dst); |
| 826 if (destination->IsDoubleStackSlot()) { |
| 827 __ swc1(dst, g.ToMemOperand(destination)); |
| 828 } |
| 829 } else { |
| 830 DCHECK_EQ(Constant::kFloat64, src.type()); |
| 831 DoubleRegister dst = destination->IsDoubleRegister() |
| 832 ? g.ToDoubleRegister(destination) |
| 833 : kScratchDoubleReg; |
| 834 __ Move(dst, src.ToFloat64()); |
| 835 if (destination->IsDoubleStackSlot()) { |
| 836 __ sdc1(dst, g.ToMemOperand(destination)); |
| 837 } |
| 838 } |
| 839 } else if (source->IsDoubleRegister()) { |
| 840 FPURegister src = g.ToDoubleRegister(source); |
| 841 if (destination->IsDoubleRegister()) { |
| 842 FPURegister dst = g.ToDoubleRegister(destination); |
| 843 __ Move(dst, src); |
| 844 } else { |
| 845 DCHECK(destination->IsDoubleStackSlot()); |
| 846 __ sdc1(src, g.ToMemOperand(destination)); |
| 847 } |
| 848 } else if (source->IsDoubleStackSlot()) { |
| 849 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); |
| 850 MemOperand src = g.ToMemOperand(source); |
| 851 if (destination->IsDoubleRegister()) { |
| 852 __ ldc1(g.ToDoubleRegister(destination), src); |
| 853 } else { |
| 854 FPURegister temp = kScratchDoubleReg; |
| 855 __ ldc1(temp, src); |
| 856 __ sdc1(temp, g.ToMemOperand(destination)); |
| 857 } |
| 858 } else { |
| 859 UNREACHABLE(); |
| 860 } |
| 861 } |
| 862 |
| 863 |
| 864 void CodeGenerator::AssembleSwap(InstructionOperand* source, |
| 865 InstructionOperand* destination) { |
| 866 MipsOperandConverter g(this, NULL); |
| 867 // Dispatch on the source and destination operand kinds. Not all |
| 868 // combinations are possible. |
| 869 if (source->IsRegister()) { |
| 870 // Register-register. |
| 871 Register temp = kScratchReg; |
| 872 Register src = g.ToRegister(source); |
| 873 if (destination->IsRegister()) { |
| 874 Register dst = g.ToRegister(destination); |
| 875 __ Move(temp, src); |
| 876 __ Move(src, dst); |
| 877 __ Move(dst, temp); |
| 878 } else { |
| 879 DCHECK(destination->IsStackSlot()); |
| 880 MemOperand dst = g.ToMemOperand(destination); |
| 881 __ mov(temp, src); |
| 882 __ lw(src, dst); |
| 883 __ sw(temp, dst); |
| 884 } |
| 885 } else if (source->IsStackSlot()) { |
| 886 DCHECK(destination->IsStackSlot()); |
| 887 Register temp_0 = kScratchReg; |
| 888 Register temp_1 = kCompareReg; |
| 889 MemOperand src = g.ToMemOperand(source); |
| 890 MemOperand dst = g.ToMemOperand(destination); |
| 891 __ lw(temp_0, src); |
| 892 __ lw(temp_1, dst); |
| 893 __ sw(temp_0, dst); |
| 894 __ sw(temp_1, src); |
| 895 } else if (source->IsDoubleRegister()) { |
| 896 FPURegister temp = kScratchDoubleReg; |
| 897 FPURegister src = g.ToDoubleRegister(source); |
| 898 if (destination->IsDoubleRegister()) { |
| 899 FPURegister dst = g.ToDoubleRegister(destination); |
| 900 __ Move(temp, src); |
| 901 __ Move(src, dst); |
| 902 __ Move(dst, temp); |
| 903 } else { |
| 904 DCHECK(destination->IsDoubleStackSlot()); |
| 905 MemOperand dst = g.ToMemOperand(destination); |
| 906 __ Move(temp, src); |
| 907 __ ldc1(src, dst); |
| 908 __ sdc1(temp, dst); |
| 909 } |
| 910 } else if (source->IsDoubleStackSlot()) { |
| 911 DCHECK(destination->IsDoubleStackSlot()); |
| 912 Register temp_0 = kScratchReg; |
| 913 FPURegister temp_1 = kScratchDoubleReg; |
| 914 MemOperand src0 = g.ToMemOperand(source); |
| 915 MemOperand src1(src0.rm(), src0.offset() + kPointerSize); |
| 916 MemOperand dst0 = g.ToMemOperand(destination); |
| 917 MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize); |
| 918 __ ldc1(temp_1, dst0); // Save destination in temp_1. |
| 919 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. |
| 920 __ sw(temp_0, dst0); |
| 921 __ lw(temp_0, src1); |
| 922 __ sw(temp_0, dst1); |
| 923 __ sdc1(temp_1, src0); |
| 924 } else { |
| 925 // No other combinations are possible. |
| 926 UNREACHABLE(); |
| 927 } |
| 928 } |
| 929 |
| 930 |
| 931 void CodeGenerator::AddNopForSmiCodeInlining() { |
| 932 // Unused on 32-bit ARM. Still exists on 64-bit arm. |
| 933 // TODO(plind): Unclear when this is called now. Understand, fix if needed. |
| 934 __ nop(); // Maybe PROPERTY_ACCESS_INLINED? |
| 935 } |
| 936 |
| 937 |
| 938 void CodeGenerator::EnsureSpaceForLazyDeopt() { |
| 939 int space_needed = Deoptimizer::patch_size(); |
| 940 if (!linkage()->info()->IsStub()) { |
| 941 // Ensure that we have enough space after the previous lazy-bailout |
| 942 // instruction for patching the code here. |
| 943 int current_pc = masm()->pc_offset(); |
| 944 if (current_pc < last_lazy_deopt_pc_ + space_needed) { |
| 945 // Block tramoline pool emission for duration of padding. |
| 946 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( |
| 947 masm()); |
| 948 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; |
| 949 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize); |
| 950 while (padding_size > 0) { |
| 951 __ nop(); |
| 952 padding_size -= v8::internal::Assembler::kInstrSize; |
| 953 } |
| 954 } |
| 955 } |
| 956 MarkLazyDeoptSite(); |
| 957 } |
| 958 |
| 959 #undef __ |
| 960 |
| 961 } // namespace compiler |
| 962 } // namespace internal |
| 963 } // namespace v8 |
OLD | NEW |