Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/compiler/code-generator.h" | |
| 6 #include "src/compiler/code-generator-impl.h" | |
| 7 #include "src/compiler/gap-resolver.h" | |
| 8 #include "src/compiler/node-matchers.h" | |
| 9 #include "src/compiler/node-properties-inl.h" | |
| 10 #include "src/mips/macro-assembler-mips.h" | |
| 11 #include "src/scopes.h" | |
| 12 | |
| 13 namespace v8 { | |
| 14 namespace internal { | |
| 15 namespace compiler { | |
| 16 | |
| 17 #define __ masm()-> | |
| 18 | |
| 19 | |
| 20 // TODO(plind): Possibly avoid using these lithium names. | |
| 21 #define kScratchReg kLithiumScratchReg | |
| 22 #define kCompareReg kLithiumScratchReg2 | |
| 23 #define kScratchDoubleReg kLithiumScratchDouble | |
| 24 | |
| 25 | |
| 26 // TODO(plind): consider renaming these macros. | |
| 27 #define TRACE_MSG(msg) \ | |
| 28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ | |
| 29 __LINE__) | |
| 30 | |
| 31 #define TRACE_UNIMPL() \ | |
| 32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \ | |
| 33 __LINE__) | |
| 34 | |
| 35 | |
| 36 // Adds Mips-specific methods to convert InstructionOperands. | |
| 37 class MipsOperandConverter : public InstructionOperandConverter { | |
| 38 public: | |
| 39 MipsOperandConverter(CodeGenerator* gen, Instruction* instr) | |
| 40 : InstructionOperandConverter(gen, instr) {} | |
| 41 | |
| 42 Operand InputImmediate(int index) { | |
| 43 Constant constant = ToConstant(instr_->InputAt(index)); | |
| 44 switch (constant.type()) { | |
| 45 case Constant::kInt32: | |
| 46 return Operand(constant.ToInt32()); | |
| 47 case Constant::kFloat64: | |
| 48 return Operand( | |
| 49 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); | |
| 50 case Constant::kInt64: | |
| 51 case Constant::kExternalReference: | |
| 52 case Constant::kHeapObject: | |
| 53 // TODO(plind): Maybe we should handle ExtRef & HeapObj here? | |
| 54 // maybe not done on arm due to const pool ?? | |
| 55 break; | |
| 56 } | |
| 57 UNREACHABLE(); | |
| 58 return Operand(zero_reg); | |
| 59 } | |
| 60 | |
| 61 Operand InputOperand(int index) { | |
| 62 InstructionOperand* op = instr_->InputAt(index); | |
| 63 if (op->IsRegister()) { | |
| 64 return Operand(ToRegister(op)); | |
| 65 } | |
| 66 return InputImmediate(index); | |
| 67 } | |
| 68 | |
| 69 MemOperand MemoryOperand(int* first_index) { | |
| 70 const int index = *first_index; | |
| 71 switch (AddressingModeField::decode(instr_->opcode())) { | |
| 72 case kMode_None: | |
| 73 break; | |
| 74 case kMode_MRI: | |
| 75 *first_index += 2; | |
| 76 return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); | |
| 77 case kMode_MRR: | |
| 78 // TODO(plind): r6 address mode, to be implemented ... | |
| 79 UNREACHABLE(); | |
| 80 } | |
| 81 UNREACHABLE(); | |
| 82 return MemOperand(no_reg); | |
| 83 } | |
| 84 | |
| 85 MemOperand MemoryOperand() { | |
| 86 int index = 0; | |
| 87 return MemoryOperand(&index); | |
| 88 } | |
| 89 | |
| 90 MemOperand ToMemOperand(InstructionOperand* op) const { | |
| 91 DCHECK(op != NULL); | |
| 92 DCHECK(!op->IsRegister()); | |
| 93 DCHECK(!op->IsDoubleRegister()); | |
| 94 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); | |
| 95 // The linkage computes where all spill slots are located. | |
| 96 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0); | |
| 97 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); | |
| 98 } | |
| 99 }; | |
| 100 | |
| 101 | |
| 102 static inline bool HasRegisterInput(Instruction* instr, int index) { | |
| 103 return instr->InputAt(index)->IsRegister(); | |
| 104 } | |
| 105 | |
| 106 | |
| 107 // TODO(plind): There are only 3 shift ops, does that justify this slightly | |
| 108 // messy macro? Consider expanding this in place for sll, srl, sra ops. | |
|
titzer
2014/09/24 15:23:31
I agree that you should kill this macro here.
paul.l...
2014/09/25 17:18:38
Done.
| |
| 109 #define ASSEMBLE_SHIFT(asm_instr) \ | |
| 110 do { \ | |
| 111 if (instr->InputAt(1)->IsRegister()) { \ | |
| 112 __ asm_instr##v(i.OutputRegister(), i.InputRegister(0), \ | |
| 113 i.InputRegister(1)); \ | |
| 114 } else { \ | |
| 115 int32_t imm = i.InputOperand(1).immediate(); \ | |
| 116 __ asm_instr(i.OutputRegister(), i.InputRegister(0), imm); \ | |
| 117 } \ | |
| 118 } while (0); | |
| 119 | |
| 120 | |
| 121 // Assembles an instruction after register allocation, producing machine code. | |
| 122 void CodeGenerator::AssembleArchInstruction(Instruction* instr) { | |
| 123 MipsOperandConverter i(this, instr); | |
| 124 InstructionCode opcode = instr->opcode(); | |
| 125 | |
| 126 switch (ArchOpcodeField::decode(opcode)) { | |
| 127 case kArchCallCodeObject: { | |
| 128 EnsureSpaceForLazyDeopt(); | |
| 129 if (instr->InputAt(0)->IsImmediate()) { | |
| 130 __ Call(Handle<Code>::cast(i.InputHeapObject(0)), | |
| 131 RelocInfo::CODE_TARGET); | |
| 132 } else { | |
| 133 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); | |
| 134 __ Call(at); | |
| 135 } | |
| 136 AddSafepointAndDeopt(instr); | |
| 137 break; | |
| 138 } | |
| 139 case kArchCallJSFunction: { | |
| 140 EnsureSpaceForLazyDeopt(); | |
| 141 Register func = i.InputRegister(0); | |
| 142 if (FLAG_debug_code) { | |
| 143 // Check the function's context matches the context argument. | |
| 144 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); | |
| 145 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg)); | |
| 146 } | |
| 147 | |
| 148 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); | |
| 149 __ Call(at); | |
| 150 AddSafepointAndDeopt(instr); | |
| 151 break; | |
| 152 } | |
| 153 case kArchJmp: | |
| 154 __ Branch(code_->GetLabel(i.InputBlock(0))); | |
| 155 break; | |
| 156 case kArchNop: | |
| 157 // don't emit code for nops. | |
| 158 break; | |
| 159 case kArchRet: | |
| 160 AssembleReturn(); | |
| 161 break; | |
| 162 case kArchTruncateDoubleToI: | |
| 163 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0)); | |
| 164 break; | |
| 165 case kMipsAdd: | |
| 166 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 167 break; | |
| 168 case kMipsAddOvf: | |
| 169 __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), | |
| 170 i.InputOperand(1), kCompareReg, kScratchReg); | |
|
titzer
2014/09/24 15:23:31
The macro assembler methods here generate a lot of
paul.l...
2014/09/25 17:18:39
Yes, Overflow checking is an issue for MIPS. We ha
titzer
2014/09/26 10:36:21
I meant in the front end, when we lower a v8 repre
| |
| 171 break; | |
| 172 case kMipsSub: | |
| 173 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 174 break; | |
| 175 case kMipsSubOvf: | |
| 176 __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), | |
| 177 i.InputOperand(1), kCompareReg, kScratchReg); | |
| 178 break; | |
| 179 case kMipsMul: | |
| 180 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 181 break; | |
| 182 case kMipsDiv: | |
| 183 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 184 break; | |
| 185 case kMipsDivU: | |
| 186 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 187 break; | |
| 188 case kMipsMod: | |
| 189 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 190 break; | |
| 191 case kMipsModU: | |
| 192 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 193 break; | |
| 194 case kMipsAnd: | |
| 195 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 196 break; | |
| 197 case kMipsOr: | |
| 198 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 199 break; | |
| 200 case kMipsXor: | |
| 201 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 202 break; | |
| 203 case kMipsShl: | |
| 204 ASSEMBLE_SHIFT(sll); | |
| 205 break; | |
| 206 case kMipsShr: | |
| 207 ASSEMBLE_SHIFT(srl); | |
| 208 break; | |
| 209 case kMipsSar: | |
| 210 ASSEMBLE_SHIFT(sra); | |
| 211 break; | |
| 212 case kMipsRor: | |
| 213 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); | |
| 214 break; | |
| 215 case kMipsTst: | |
| 216 // Psuedo-instruction used for tst/branch. | |
| 217 __ And(kCompareReg, i.InputRegister(0), i.InputOperand(1)); | |
| 218 break; | |
| 219 case kMipsCmp: | |
| 220 // Psuedo-instruction used for cmp/branch. No opcode emitted here. | |
| 221 break; | |
| 222 case kMipsMov: | |
| 223 // TODO(plind): Should we combine mov/li like this, or use separate instr? | |
| 224 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType | |
| 225 if (HasRegisterInput(instr, 0)) { | |
| 226 __ mov(i.OutputRegister(), i.InputRegister(0)); | |
| 227 } else { | |
| 228 __ li(i.OutputRegister(), i.InputOperand(0)); | |
| 229 } | |
| 230 break; | |
| 231 | |
| 232 case kMipsFloat64Cmp: | |
| 233 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. | |
| 234 break; | |
| 235 case kMipsFloat64Add: | |
| 236 // TODO(plind): add special case: combine mult & add. | |
| 237 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | |
| 238 i.InputDoubleRegister(1)); | |
| 239 break; | |
| 240 case kMipsFloat64Sub: | |
| 241 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | |
| 242 i.InputDoubleRegister(1)); | |
| 243 break; | |
| 244 case kMipsFloat64Mul: | |
| 245 // TODO(plind): add special case: right op is -1.0, see arm port. | |
| 246 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | |
| 247 i.InputDoubleRegister(1)); | |
| 248 break; | |
| 249 case kMipsFloat64Div: | |
| 250 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | |
| 251 i.InputDoubleRegister(1)); | |
| 252 break; | |
| 253 case kMipsFloat64Mod: { | |
| 254 // TODO(bmeurer): We should really get rid of this special instruction, | |
| 255 // and generate a CallAddress instruction instead. | |
| 256 FrameScope scope(masm(), StackFrame::MANUAL); | |
| 257 __ PrepareCallCFunction(0, 2, kScratchReg); | |
| 258 __ MovToFloatParameters(i.InputDoubleRegister(0), | |
| 259 i.InputDoubleRegister(1)); | |
| 260 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), | |
| 261 0, 2); | |
| 262 // Move the result in the double result register. | |
| 263 __ MovFromFloatResult(i.OutputDoubleRegister()); | |
| 264 break; | |
| 265 } | |
| 266 case kMipsInt32ToFloat64: { | |
| 267 FPURegister scratch = kScratchDoubleReg; | |
| 268 __ mtc1(i.InputRegister(0), scratch); | |
| 269 __ cvt_d_w(i.OutputDoubleRegister(), scratch); | |
| 270 break; | |
| 271 } | |
| 272 case kMipsUint32ToFloat64: { | |
| 273 FPURegister scratch = kScratchDoubleReg; | |
| 274 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch); | |
| 275 break; | |
| 276 } | |
| 277 case kMipsFloat64ToInt32: { | |
| 278 FPURegister scratch = kScratchDoubleReg; | |
| 279 // Other arches use round to zero here, so we follow. | |
| 280 __ trunc_w_d(scratch, i.InputDoubleRegister(0)); | |
| 281 __ mfc1(i.OutputRegister(), scratch); | |
| 282 break; | |
| 283 } | |
| 284 case kMipsFloat64ToUint32: { | |
| 285 FPURegister scratch = kScratchDoubleReg; | |
| 286 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function. | |
| 287 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch); | |
| 288 break; | |
| 289 } | |
| 290 // ... more basic instructions ... | |
| 291 | |
| 292 case kMipsLbu: | |
| 293 __ lbu(i.OutputRegister(), i.MemoryOperand()); | |
| 294 break; | |
| 295 case kMipsLb: | |
| 296 __ lb(i.OutputRegister(), i.MemoryOperand()); | |
| 297 break; | |
| 298 case kMipsSb: | |
| 299 __ sb(i.InputRegister(2), i.MemoryOperand()); | |
| 300 break; | |
| 301 case kMipsLhu: | |
| 302 __ lhu(i.OutputRegister(), i.MemoryOperand()); | |
| 303 break; | |
| 304 case kMipsLh: | |
| 305 __ lh(i.OutputRegister(), i.MemoryOperand()); | |
| 306 break; | |
| 307 case kMipsSh: | |
| 308 __ sh(i.InputRegister(2), i.MemoryOperand()); | |
| 309 break; | |
| 310 case kMipsLw: | |
| 311 __ lw(i.OutputRegister(), i.MemoryOperand()); | |
| 312 break; | |
| 313 case kMipsSw: | |
| 314 __ sw(i.InputRegister(2), i.MemoryOperand()); | |
| 315 break; | |
| 316 case kMipsLwc1: { | |
| 317 FPURegister scratch = kScratchDoubleReg; | |
| 318 __ lwc1(scratch, i.MemoryOperand()); | |
| 319 __ cvt_d_s(i.OutputDoubleRegister(), scratch); | |
| 320 break; | |
| 321 } | |
| 322 case kMipsSwc1: { | |
| 323 int index = 0; | |
| 324 FPURegister scratch = kScratchDoubleReg; | |
| 325 MemOperand operand = i.MemoryOperand(&index); | |
| 326 __ cvt_s_d(scratch, i.InputDoubleRegister(index)); | |
| 327 __ swc1(scratch, operand); | |
| 328 break; | |
| 329 } | |
| 330 case kMipsLdc1: | |
| 331 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); | |
| 332 break; | |
| 333 case kMipsSdc1: | |
| 334 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand()); | |
| 335 break; | |
| 336 case kMipsPush: | |
| 337 __ Push(i.InputRegister(0)); | |
| 338 break; | |
| 339 case kMipsStoreWriteBarrier: | |
| 340 Register object = i.InputRegister(0); | |
| 341 Register index = i.InputRegister(1); | |
| 342 Register value = i.InputRegister(2); | |
| 343 __ addu(index, object, index); | |
| 344 __ sw(value, MemOperand(index)); | |
| 345 SaveFPRegsMode mode = | |
| 346 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; | |
| 347 RAStatus ra_status = kRAHasNotBeenSaved; | |
| 348 __ RecordWrite(object, index, value, ra_status, mode); | |
| 349 break; | |
| 350 } | |
| 351 } | |
| 352 | |
| 353 | |
| 354 #define UNSUPPORTED_COND(opcode, condition) \ | |
| 355 OFStream out(stdout); \ | |
| 356 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \ | |
| 357 UNIMPLEMENTED(); | |
| 358 | |
| 359 // Assembles branches after an instruction. | |
| 360 void CodeGenerator::AssembleArchBranch(Instruction* instr, | |
| 361 FlagsCondition condition) { | |
|
titzer
2014/09/24 15:23:32
The handling of branches here is tough to follow.
Benedikt Meurer
2014/09/25 05:37:10
Hm, we may need to refactor the AssembleArchBranch
paul.l...
2014/09/25 17:18:39
Complications arise from MIPS not having condition
titzer
2014/09/26 10:36:21
Ok, in that case maybe using a FlagsContinuation o
| |
| 362 MipsOperandConverter i(this, instr); | |
| 363 Label done; | |
| 364 | |
| 365 // Emit a branch. The true and false targets are always the last two inputs | |
| 366 // to the instruction. | |
| 367 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2); | |
| 368 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1); | |
| 369 bool fallthru = IsNextInAssemblyOrder(fblock); | |
| 370 Label* tlabel = code()->GetLabel(tblock); | |
| 371 Label* flabel = fallthru ? &done : code()->GetLabel(fblock); | |
| 372 Condition cc = kNoCondition; | |
| 373 | |
| 374 // MIPS does not have condition code flags, so compare and branch are | |
| 375 // implemented differently than on the other arch's. The compare operations | |
| 376 // emit mips psuedo-instructions, which are handled here by branch | |
| 377 // instructions that do the actual comparison. Essential that the input | |
| 378 // registers to compare psuedo-op are not modified before this branch op, as | |
| 379 // they are tested here. | |
| 380 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were | |
| 381 // not separated by other instructions. | |
| 382 | |
| 383 if (instr->arch_opcode() == kMipsTst) { | |
| 384 // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register. | |
| 385 switch (condition) { | |
| 386 case kNotEqual: | |
| 387 cc = ne; | |
| 388 break; | |
| 389 case kEqual: | |
| 390 cc = eq; | |
| 391 break; | |
| 392 default: | |
| 393 UNSUPPORTED_COND(kMipsTst, condition); | |
| 394 break; | |
| 395 } | |
| 396 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg)); | |
| 397 | |
| 398 } else if (instr->arch_opcode() == kMipsAddOvf || | |
| 399 instr->arch_opcode() == kMipsSubOvf) { | |
| 400 // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow. | |
| 401 switch (condition) { | |
| 402 case kOverflow: | |
| 403 cc = lt; | |
| 404 break; | |
| 405 case kNotOverflow: | |
| 406 cc = ge; | |
| 407 break; | |
| 408 default: | |
| 409 UNSUPPORTED_COND(kMipsAddOvf, condition); | |
| 410 break; | |
| 411 } | |
| 412 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg)); | |
| 413 | |
| 414 } else if (instr->arch_opcode() == kMipsCmp) { | |
| 415 switch (condition) { | |
| 416 case kEqual: | |
| 417 cc = eq; | |
| 418 break; | |
| 419 case kNotEqual: | |
| 420 cc = ne; | |
| 421 break; | |
| 422 case kSignedLessThan: | |
| 423 cc = lt; | |
| 424 break; | |
| 425 case kSignedGreaterThanOrEqual: | |
| 426 cc = ge; | |
| 427 break; | |
| 428 case kSignedLessThanOrEqual: | |
| 429 cc = le; | |
| 430 break; | |
| 431 case kSignedGreaterThan: | |
| 432 cc = gt; | |
| 433 break; | |
| 434 case kUnsignedLessThan: | |
| 435 cc = lo; | |
| 436 break; | |
| 437 case kUnsignedGreaterThanOrEqual: | |
| 438 cc = hs; | |
| 439 break; | |
| 440 case kUnsignedLessThanOrEqual: | |
| 441 cc = ls; | |
| 442 break; | |
| 443 case kUnsignedGreaterThan: | |
| 444 cc = hi; | |
| 445 break; | |
| 446 default: | |
| 447 UNSUPPORTED_COND(kMipsCmp, condition); | |
| 448 break; | |
| 449 } | |
| 450 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); | |
| 451 | |
| 452 if (!fallthru) __ Branch(flabel); // no fallthru to flabel. | |
| 453 __ bind(&done); | |
| 454 | |
| 455 } else if (instr->arch_opcode() == kMipsFloat64Cmp) { | |
| 456 // TODO(dusmil) optimize unordered checks to use less instructions | |
| 457 // even if we have to unfold BranchF macro. | |
| 458 Label* nan = flabel; | |
| 459 switch (condition) { | |
| 460 case kUnorderedEqual: | |
| 461 cc = eq; | |
| 462 break; | |
| 463 case kUnorderedNotEqual: | |
| 464 cc = ne; | |
| 465 nan = tlabel; | |
| 466 break; | |
| 467 case kUnorderedLessThan: | |
| 468 cc = lt; | |
| 469 break; | |
| 470 case kUnorderedGreaterThanOrEqual: | |
| 471 cc = ge; | |
| 472 nan = tlabel; | |
| 473 break; | |
| 474 case kUnorderedLessThanOrEqual: | |
| 475 cc = le; | |
| 476 break; | |
| 477 case kUnorderedGreaterThan: | |
| 478 cc = gt; | |
| 479 nan = tlabel; | |
| 480 break; | |
| 481 default: | |
| 482 UNSUPPORTED_COND(kMipsFloat64Cmp, condition); | |
| 483 break; | |
| 484 } | |
| 485 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0), | |
| 486 i.InputDoubleRegister(1)); | |
| 487 | |
| 488 if (!fallthru) __ Branch(flabel); // no fallthru to flabel. | |
| 489 __ bind(&done); | |
| 490 | |
| 491 } else { | |
| 492 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", | |
| 493 instr->arch_opcode()); | |
| 494 UNIMPLEMENTED(); | |
| 495 } | |
| 496 } | |
| 497 | |
| 498 | |
| 499 // Assembles boolean materializations after an instruction. | |
| 500 void CodeGenerator::AssembleArchBoolean(Instruction* instr, | |
| 501 FlagsCondition condition) { | |
| 502 MipsOperandConverter i(this, instr); | |
| 503 Label done; | |
| 504 | |
| 505 // Materialize a full 32-bit 1 or 0 value. The result register is always the | |
| 506 // last output of the instruction. | |
| 507 Label false_value; | |
| 508 DCHECK_NE(0, instr->OutputCount()); | |
| 509 Register result = i.OutputRegister(instr->OutputCount() - 1); | |
| 510 Condition cc = kNoCondition; | |
| 511 | |
| 512 // MIPS does not have condition code flags, so compare and branch are | |
| 513 // implemented differently than on the other arch's. The compare operations | |
| 514 // emit mips psuedo-instructions, which are checked and handled here. | |
| 515 | |
| 516 // For materializations, we use delay slot to set the result true, and | |
| 517 // in the false case, where we fall thru the branch, we reset the result | |
| 518 // false. | |
| 519 | |
| 520 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were | |
| 521 // not separated by other instructions. | |
| 522 if (instr->arch_opcode() == kMipsTst) { | |
| 523 // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register. | |
| 524 switch (condition) { | |
| 525 case kNotEqual: | |
| 526 cc = ne; | |
| 527 break; | |
| 528 case kEqual: | |
| 529 cc = eq; | |
| 530 break; | |
| 531 default: | |
| 532 UNSUPPORTED_COND(kMipsTst, condition); | |
| 533 break; | |
| 534 } | |
| 535 __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg)); | |
| 536 __ li(result, Operand(1)); // In delay slot. | |
| 537 | |
| 538 } else if (instr->arch_opcode() == kMipsAddOvf || | |
| 539 instr->arch_opcode() == kMipsSubOvf) { | |
| 540 // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow. | |
| 541 switch (condition) { | |
| 542 case kOverflow: | |
| 543 cc = lt; | |
| 544 break; | |
| 545 case kNotOverflow: | |
| 546 cc = ge; | |
| 547 break; | |
| 548 default: | |
| 549 UNSUPPORTED_COND(kMipsAddOvf, condition); | |
| 550 break; | |
| 551 } | |
| 552 __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg)); | |
| 553 __ li(result, Operand(1)); // In delay slot. | |
| 554 | |
| 555 | |
| 556 } else if (instr->arch_opcode() == kMipsCmp) { | |
| 557 Register left = i.InputRegister(0); | |
| 558 Operand right = i.InputOperand(1); | |
| 559 switch (condition) { | |
| 560 case kEqual: | |
| 561 cc = eq; | |
| 562 break; | |
| 563 case kNotEqual: | |
| 564 cc = ne; | |
| 565 break; | |
| 566 case kSignedLessThan: | |
| 567 cc = lt; | |
| 568 break; | |
| 569 case kSignedGreaterThanOrEqual: | |
| 570 cc = ge; | |
| 571 break; | |
| 572 case kSignedLessThanOrEqual: | |
| 573 cc = le; | |
| 574 break; | |
| 575 case kSignedGreaterThan: | |
| 576 cc = gt; | |
| 577 break; | |
| 578 case kUnsignedLessThan: | |
| 579 cc = lo; | |
| 580 break; | |
| 581 case kUnsignedGreaterThanOrEqual: | |
| 582 cc = hs; | |
| 583 break; | |
| 584 case kUnsignedLessThanOrEqual: | |
| 585 cc = ls; | |
| 586 break; | |
| 587 case kUnsignedGreaterThan: | |
| 588 cc = hi; | |
| 589 break; | |
| 590 default: | |
| 591 UNSUPPORTED_COND(kMipsCmp, condition); | |
| 592 break; | |
| 593 } | |
| 594 __ Branch(USE_DELAY_SLOT, &done, cc, left, right); | |
| 595 __ li(result, Operand(1)); // In delay slot. | |
| 596 | |
| 597 } else if (instr->arch_opcode() == kMipsFloat64Cmp) { | |
| 598 FPURegister left = i.InputDoubleRegister(0); | |
| 599 FPURegister right = i.InputDoubleRegister(1); | |
| 600 // TODO(plind): Provide NaN-testing macro-asm function without need for | |
| 601 // BranchF. | |
| 602 FPURegister dummy1 = f0; | |
| 603 FPURegister dummy2 = f2; | |
| 604 switch (condition) { | |
| 605 case kUnorderedEqual: | |
| 606 // TODO(plind): improve the NaN testing throughout this function. | |
| 607 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); | |
| 608 cc = eq; | |
| 609 break; | |
| 610 case kUnorderedNotEqual: | |
| 611 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); | |
| 612 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN. | |
| 613 cc = ne; | |
| 614 break; | |
| 615 case kUnorderedLessThan: | |
| 616 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); | |
| 617 cc = lt; | |
| 618 break; | |
| 619 case kUnorderedGreaterThanOrEqual: | |
| 620 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); | |
| 621 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN. | |
| 622 cc = ge; | |
| 623 break; | |
| 624 case kUnorderedLessThanOrEqual: | |
| 625 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); | |
| 626 cc = le; | |
| 627 break; | |
| 628 case kUnorderedGreaterThan: | |
| 629 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); | |
| 630 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN. | |
| 631 cc = gt; | |
| 632 break; | |
| 633 default: | |
| 634 UNSUPPORTED_COND(kMipsCmp, condition); | |
| 635 break; | |
| 636 } | |
| 637 __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right); | |
| 638 __ li(result, Operand(1)); // In delay slot - branch taken returns 1. | |
| 639 // Fall-thru (branch not taken) returns 0. | |
| 640 | |
| 641 } else { | |
| 642 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", | |
| 643 instr->arch_opcode()); | |
| 644 TRACE_UNIMPL(); | |
| 645 UNIMPLEMENTED(); | |
| 646 } | |
| 647 // Fallthru case is the false materialization. | |
| 648 __ bind(&false_value); | |
| 649 __ li(result, Operand(0)); | |
| 650 __ bind(&done); | |
| 651 } | |
| 652 | |
| 653 | |
| 654 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) { | |
| 655 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( | |
| 656 isolate(), deoptimization_id, Deoptimizer::LAZY); | |
| 657 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); | |
| 658 } | |
| 659 | |
| 660 | |
| 661 void CodeGenerator::AssemblePrologue() { | |
| 662 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); | |
| 663 if (descriptor->kind() == CallDescriptor::kCallAddress) { | |
| 664 __ Push(ra, fp); | |
| 665 __ mov(fp, sp); | |
| 666 const RegList saves = descriptor->CalleeSavedRegisters(); | |
| 667 if (saves != 0) { // Save callee-saved registers. | |
| 668 // TODO(plind): make callee save size const, possibly DCHECK it. | |
| 669 int register_save_area_size = 0; | |
| 670 for (int i = Register::kNumRegisters - 1; i >= 0; i--) { | |
| 671 if (!((1 << i) & saves)) continue; | |
| 672 register_save_area_size += kPointerSize; | |
| 673 } | |
| 674 frame()->SetRegisterSaveAreaSize(register_save_area_size); | |
| 675 __ MultiPush(saves); | |
| 676 } | |
| 677 } else if (descriptor->IsJSFunctionCall()) { | |
| 678 CompilationInfo* info = linkage()->info(); | |
| 679 __ Prologue(info->IsCodePreAgingActive()); | |
| 680 frame()->SetRegisterSaveAreaSize( | |
| 681 StandardFrameConstants::kFixedFrameSizeFromFp); | |
| 682 | |
| 683 // Sloppy mode functions and builtins need to replace the receiver with the | |
| 684 // global proxy when called as functions (without an explicit receiver | |
| 685 // object). | |
| 686 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? | |
| 687 if (info->strict_mode() == SLOPPY && !info->is_native()) { | |
| 688 Label ok; | |
| 689 // +2 for return address and saved frame pointer. | |
| 690 int receiver_slot = info->scope()->num_parameters() + 2; | |
| 691 __ lw(a2, MemOperand(fp, receiver_slot * kPointerSize)); | |
| 692 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | |
| 693 __ Branch(&ok, ne, a2, Operand(at)); | |
| 694 | |
| 695 __ lw(a2, GlobalObjectOperand()); | |
| 696 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); | |
| 697 __ sw(a2, MemOperand(fp, receiver_slot * kPointerSize)); | |
| 698 __ bind(&ok); | |
| 699 } | |
| 700 } else { | |
| 701 __ StubPrologue(); | |
| 702 frame()->SetRegisterSaveAreaSize( | |
| 703 StandardFrameConstants::kFixedFrameSizeFromFp); | |
| 704 } | |
| 705 int stack_slots = frame()->GetSpillSlotCount(); | |
| 706 if (stack_slots > 0) { | |
| 707 __ Subu(sp, sp, Operand(stack_slots * kPointerSize)); | |
| 708 } | |
| 709 } | |
| 710 | |
| 711 | |
| 712 void CodeGenerator::AssembleReturn() { | |
| 713 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); | |
| 714 if (descriptor->kind() == CallDescriptor::kCallAddress) { | |
| 715 if (frame()->GetRegisterSaveAreaSize() > 0) { | |
| 716 // Remove this frame's spill slots first. | |
| 717 int stack_slots = frame()->GetSpillSlotCount(); | |
| 718 if (stack_slots > 0) { | |
| 719 __ Addu(sp, sp, Operand(stack_slots * kPointerSize)); | |
| 720 } | |
| 721 // Restore registers. | |
| 722 const RegList saves = descriptor->CalleeSavedRegisters(); | |
| 723 if (saves != 0) { | |
| 724 __ MultiPop(saves); | |
| 725 } | |
| 726 } | |
| 727 __ mov(sp, fp); | |
| 728 __ Pop(ra, fp); | |
| 729 __ Ret(); | |
| 730 } else { | |
| 731 __ mov(sp, fp); | |
| 732 __ Pop(ra, fp); | |
| 733 int pop_count = descriptor->IsJSFunctionCall() | |
| 734 ? static_cast<int>(descriptor->JSParameterCount()) | |
| 735 : 0; | |
| 736 __ DropAndRet(pop_count); | |
| 737 } | |
| 738 } | |
| 739 | |
| 740 | |
| 741 void CodeGenerator::AssembleMove(InstructionOperand* source, | |
| 742 InstructionOperand* destination) { | |
| 743 MipsOperandConverter g(this, NULL); | |
| 744 // Dispatch on the source and destination operand kinds. Not all | |
| 745 // combinations are possible. | |
| 746 if (source->IsRegister()) { | |
| 747 DCHECK(destination->IsRegister() || destination->IsStackSlot()); | |
| 748 Register src = g.ToRegister(source); | |
| 749 if (destination->IsRegister()) { | |
| 750 __ mov(g.ToRegister(destination), src); | |
| 751 } else { | |
| 752 __ sw(src, g.ToMemOperand(destination)); | |
| 753 } | |
| 754 } else if (source->IsStackSlot()) { | |
| 755 DCHECK(destination->IsRegister() || destination->IsStackSlot()); | |
| 756 MemOperand src = g.ToMemOperand(source); | |
| 757 if (destination->IsRegister()) { | |
| 758 __ lw(g.ToRegister(destination), src); | |
| 759 } else { | |
| 760 Register temp = kScratchReg; | |
| 761 __ lw(temp, src); | |
| 762 __ sw(temp, g.ToMemOperand(destination)); | |
| 763 } | |
| 764 } else if (source->IsConstant()) { | |
| 765 if (destination->IsRegister() || destination->IsStackSlot()) { | |
| 766 Register dst = | |
| 767 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; | |
| 768 Constant src = g.ToConstant(source); | |
| 769 switch (src.type()) { | |
| 770 case Constant::kInt32: | |
| 771 __ li(dst, Operand(src.ToInt32())); | |
| 772 break; | |
| 773 case Constant::kInt64: | |
| 774 UNREACHABLE(); | |
| 775 break; | |
| 776 case Constant::kFloat64: | |
| 777 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED)); | |
| 778 break; | |
| 779 case Constant::kExternalReference: | |
| 780 __ li(dst, Operand(src.ToExternalReference())); | |
| 781 break; | |
| 782 case Constant::kHeapObject: | |
| 783 __ li(dst, src.ToHeapObject()); | |
| 784 break; | |
| 785 } | |
| 786 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination)); | |
| 787 } else if (destination->IsDoubleRegister()) { | |
| 788 FPURegister result = g.ToDoubleRegister(destination); | |
| 789 __ Move(result, g.ToDouble(source)); | |
| 790 } else { | |
| 791 DCHECK(destination->IsDoubleStackSlot()); | |
| 792 FPURegister temp = kScratchDoubleReg; | |
| 793 __ Move(temp, g.ToDouble(source)); | |
| 794 __ sdc1(temp, g.ToMemOperand(destination)); | |
| 795 } | |
| 796 } else if (source->IsDoubleRegister()) { | |
| 797 FPURegister src = g.ToDoubleRegister(source); | |
| 798 if (destination->IsDoubleRegister()) { | |
| 799 FPURegister dst = g.ToDoubleRegister(destination); | |
| 800 __ Move(dst, src); | |
| 801 } else { | |
| 802 DCHECK(destination->IsDoubleStackSlot()); | |
| 803 __ sdc1(src, g.ToMemOperand(destination)); | |
| 804 } | |
| 805 } else if (source->IsDoubleStackSlot()) { | |
| 806 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); | |
| 807 MemOperand src = g.ToMemOperand(source); | |
| 808 if (destination->IsDoubleRegister()) { | |
| 809 __ ldc1(g.ToDoubleRegister(destination), src); | |
| 810 } else { | |
| 811 FPURegister temp = kScratchDoubleReg; | |
| 812 __ ldc1(temp, src); | |
| 813 __ sdc1(temp, g.ToMemOperand(destination)); | |
| 814 } | |
| 815 } else { | |
| 816 UNREACHABLE(); | |
| 817 } | |
| 818 } | |
| 819 | |
| 820 | |
| 821 void CodeGenerator::AssembleSwap(InstructionOperand* source, | |
| 822 InstructionOperand* destination) { | |
| 823 MipsOperandConverter g(this, NULL); | |
| 824 // Dispatch on the source and destination operand kinds. Not all | |
| 825 // combinations are possible. | |
| 826 if (source->IsRegister()) { | |
| 827 // Register-register. | |
| 828 Register temp = kScratchReg; | |
| 829 Register src = g.ToRegister(source); | |
| 830 if (destination->IsRegister()) { | |
| 831 Register dst = g.ToRegister(destination); | |
| 832 __ Move(temp, src); | |
| 833 __ Move(src, dst); | |
| 834 __ Move(dst, temp); | |
| 835 } else { | |
| 836 DCHECK(destination->IsStackSlot()); | |
| 837 MemOperand dst = g.ToMemOperand(destination); | |
| 838 __ mov(temp, src); | |
| 839 __ lw(src, dst); | |
| 840 __ sw(temp, dst); | |
| 841 } | |
| 842 } else if (source->IsStackSlot()) { | |
| 843 DCHECK(destination->IsStackSlot()); | |
| 844 Register temp_0 = kScratchReg; | |
| 845 Register temp_1 = kCompareReg; | |
| 846 MemOperand src = g.ToMemOperand(source); | |
| 847 MemOperand dst = g.ToMemOperand(destination); | |
| 848 __ lw(temp_0, src); | |
| 849 __ lw(temp_1, dst); | |
| 850 __ sw(temp_0, dst); | |
| 851 __ sw(temp_1, src); | |
| 852 } else if (source->IsDoubleRegister()) { | |
| 853 FPURegister temp = kScratchDoubleReg; | |
| 854 FPURegister src = g.ToDoubleRegister(source); | |
| 855 if (destination->IsDoubleRegister()) { | |
| 856 FPURegister dst = g.ToDoubleRegister(destination); | |
| 857 __ Move(temp, src); | |
| 858 __ Move(src, dst); | |
| 859 __ Move(dst, temp); | |
| 860 } else { | |
| 861 DCHECK(destination->IsDoubleStackSlot()); | |
| 862 MemOperand dst = g.ToMemOperand(destination); | |
| 863 __ Move(temp, src); | |
| 864 __ ldc1(src, dst); | |
| 865 __ sdc1(temp, dst); | |
| 866 } | |
| 867 } else if (source->IsDoubleStackSlot()) { | |
| 868 DCHECK(destination->IsDoubleStackSlot()); | |
| 869 Register temp_0 = kScratchReg; | |
| 870 FPURegister temp_1 = kScratchDoubleReg; | |
| 871 MemOperand src0 = g.ToMemOperand(source); | |
| 872 MemOperand src1(src0.rm(), src0.offset() + kPointerSize); | |
| 873 MemOperand dst0 = g.ToMemOperand(destination); | |
| 874 MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize); | |
| 875 __ ldc1(temp_1, dst0); // Save destination in temp_1. | |
| 876 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. | |
| 877 __ sw(temp_0, dst0); | |
| 878 __ lw(temp_0, src1); | |
| 879 __ sw(temp_0, dst1); | |
| 880 __ sdc1(temp_1, src0); | |
| 881 } else { | |
| 882 // No other combinations are possible. | |
| 883 UNREACHABLE(); | |
| 884 } | |
| 885 } | |
| 886 | |
| 887 | |
| 888 void CodeGenerator::AddNopForSmiCodeInlining() { | |
| 889 // Unused on 32-bit ARM. Still exists on 64-bit arm. | |
| 890 // TODO(plind): Unclear when this is called now. Understand, fix if needed. | |
| 891 __ nop(); // Maybe PROPERTY_ACCESS_INLINED? | |
| 892 } | |
| 893 | |
| 894 | |
| 895 void CodeGenerator::EnsureSpaceForLazyDeopt() { | |
| 896 int space_needed = Deoptimizer::patch_size(); | |
| 897 if (!linkage()->info()->IsStub()) { | |
| 898 // Ensure that we have enough space after the previous lazy-bailout | |
| 899 // instruction for patching the code here. | |
| 900 int current_pc = masm()->pc_offset(); | |
| 901 if (current_pc < last_lazy_deopt_pc_ + space_needed) { | |
| 902 // Block tramoline pool emission for duration of padding. | |
| 903 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( | |
| 904 masm()); | |
| 905 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; | |
| 906 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize); | |
| 907 while (padding_size > 0) { | |
| 908 __ nop(); | |
| 909 padding_size -= v8::internal::Assembler::kInstrSize; | |
| 910 } | |
| 911 } | |
| 912 } | |
| 913 MarkLazyDeoptSite(); | |
| 914 } | |
| 915 | |
| 916 #undef __ | |
| 917 | |
| 918 } // namespace compiler | |
| 919 } // namespace internal | |
| 920 } // namespace v8 | |
| OLD | NEW |