OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/base/bits.h" |
| 6 #include "src/compiler/instruction-selector-impl.h" |
| 7 #include "src/compiler/node-matchers.h" |
| 8 |
| 9 namespace v8 { |
| 10 namespace internal { |
| 11 namespace compiler { |
| 12 |
| 13 #define TRACE_UNIMPL() \ |
| 14 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) |
| 15 |
| 16 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) |
| 17 |
| 18 |
| 19 // Adds Mips-specific methods for generating InstructionOperands. |
| 20 class MipsOperandGenerator FINAL : public OperandGenerator { |
| 21 public: |
| 22 explicit MipsOperandGenerator(InstructionSelector* selector) |
| 23 : OperandGenerator(selector) {} |
| 24 |
| 25 InstructionOperand* UseOperand(Node* node, InstructionCode opcode) { |
| 26 if (CanBeImmediate(node, opcode)) { |
| 27 return UseImmediate(node); |
| 28 } |
| 29 return UseRegister(node); |
| 30 } |
| 31 |
| 32 bool CanBeImmediate(Node* node, InstructionCode opcode) { |
| 33 Int32Matcher m(node); |
| 34 if (!m.HasValue()) return false; |
| 35 int32_t value = m.Value(); |
| 36 switch (ArchOpcodeField::decode(opcode)) { |
| 37 case kMipsShl: |
| 38 case kMipsSar: |
| 39 case kMipsShr: |
| 40 return is_uint5(value); |
| 41 case kMipsXor: |
| 42 return is_uint16(value); |
| 43 case kMipsLdc1: |
| 44 case kMipsSdc1: |
| 45 return is_int16(value + kIntSize); |
| 46 default: |
| 47 return is_int16(value); |
| 48 } |
| 49 } |
| 50 |
| 51 private: |
| 52 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { |
| 53 TRACE_UNIMPL(); |
| 54 return false; |
| 55 } |
| 56 }; |
| 57 |
| 58 |
| 59 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, |
| 60 Node* node) { |
| 61 MipsOperandGenerator g(selector); |
| 62 selector->Emit(opcode, g.DefineAsRegister(node), |
| 63 g.UseRegister(node->InputAt(0)), |
| 64 g.UseRegister(node->InputAt(1))); |
| 65 } |
| 66 |
| 67 |
| 68 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, |
| 69 Node* node) { |
| 70 MipsOperandGenerator g(selector); |
| 71 selector->Emit(opcode, g.DefineAsRegister(node), |
| 72 g.UseRegister(node->InputAt(0)), |
| 73 g.UseOperand(node->InputAt(1), opcode)); |
| 74 } |
| 75 |
| 76 |
| 77 static void VisitBinop(InstructionSelector* selector, Node* node, |
| 78 InstructionCode opcode, FlagsContinuation* cont) { |
| 79 MipsOperandGenerator g(selector); |
| 80 Int32BinopMatcher m(node); |
| 81 InstructionOperand* inputs[4]; |
| 82 size_t input_count = 0; |
| 83 InstructionOperand* outputs[2]; |
| 84 size_t output_count = 0; |
| 85 |
| 86 inputs[input_count++] = g.UseRegister(m.left().node()); |
| 87 inputs[input_count++] = g.UseOperand(m.right().node(), opcode); |
| 88 |
| 89 if (cont->IsBranch()) { |
| 90 inputs[input_count++] = g.Label(cont->true_block()); |
| 91 inputs[input_count++] = g.Label(cont->false_block()); |
| 92 } |
| 93 |
| 94 outputs[output_count++] = g.DefineAsRegister(node); |
| 95 if (cont->IsSet()) { |
| 96 outputs[output_count++] = g.DefineAsRegister(cont->result()); |
| 97 } |
| 98 |
| 99 DCHECK_NE(0, input_count); |
| 100 DCHECK_NE(0, output_count); |
| 101 DCHECK_GE(arraysize(inputs), input_count); |
| 102 DCHECK_GE(arraysize(outputs), output_count); |
| 103 |
| 104 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count, |
| 105 outputs, input_count, inputs); |
| 106 if (cont->IsBranch()) instr->MarkAsControl(); |
| 107 } |
| 108 |
| 109 |
| 110 static void VisitBinop(InstructionSelector* selector, Node* node, |
| 111 InstructionCode opcode) { |
| 112 FlagsContinuation cont; |
| 113 VisitBinop(selector, node, opcode, &cont); |
| 114 } |
| 115 |
| 116 |
| 117 void InstructionSelector::VisitLoad(Node* node) { |
| 118 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); |
| 119 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); |
| 120 MipsOperandGenerator g(this); |
| 121 Node* base = node->InputAt(0); |
| 122 Node* index = node->InputAt(1); |
| 123 |
| 124 ArchOpcode opcode; |
| 125 switch (rep) { |
| 126 case kRepFloat32: |
| 127 opcode = kMipsLwc1; |
| 128 break; |
| 129 case kRepFloat64: |
| 130 opcode = kMipsLdc1; |
| 131 break; |
| 132 case kRepBit: // Fall through. |
| 133 case kRepWord8: |
| 134 opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb; |
| 135 break; |
| 136 case kRepWord16: |
| 137 opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh; |
| 138 break; |
| 139 case kRepTagged: // Fall through. |
| 140 case kRepWord32: |
| 141 opcode = kMipsLw; |
| 142 break; |
| 143 default: |
| 144 UNREACHABLE(); |
| 145 return; |
| 146 } |
| 147 |
| 148 if (g.CanBeImmediate(index, opcode)) { |
| 149 Emit(opcode | AddressingModeField::encode(kMode_MRI), |
| 150 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); |
| 151 } else { |
| 152 InstructionOperand* addr_reg = g.TempRegister(); |
| 153 Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, |
| 154 g.UseRegister(index), g.UseRegister(base)); |
| 155 // Emit desired load opcode, using temp addr_reg. |
| 156 Emit(opcode | AddressingModeField::encode(kMode_MRI), |
| 157 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); |
| 158 } |
| 159 } |
| 160 |
| 161 |
| 162 void InstructionSelector::VisitStore(Node* node) { |
| 163 MipsOperandGenerator g(this); |
| 164 Node* base = node->InputAt(0); |
| 165 Node* index = node->InputAt(1); |
| 166 Node* value = node->InputAt(2); |
| 167 |
| 168 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); |
| 169 MachineType rep = RepresentationOf(store_rep.machine_type()); |
| 170 if (store_rep.write_barrier_kind() == kFullWriteBarrier) { |
| 171 DCHECK(rep == kRepTagged); |
| 172 // TODO(dcarney): refactor RecordWrite function to take temp registers |
| 173 // and pass them here instead of using fixed regs |
| 174 // TODO(dcarney): handle immediate indices. |
| 175 InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)}; |
| 176 Emit(kMipsStoreWriteBarrier, NULL, g.UseFixed(base, t0), |
| 177 g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps); |
| 178 return; |
| 179 } |
| 180 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind()); |
| 181 |
| 182 ArchOpcode opcode; |
| 183 switch (rep) { |
| 184 case kRepFloat32: |
| 185 opcode = kMipsSwc1; |
| 186 break; |
| 187 case kRepFloat64: |
| 188 opcode = kMipsSdc1; |
| 189 break; |
| 190 case kRepBit: // Fall through. |
| 191 case kRepWord8: |
| 192 opcode = kMipsSb; |
| 193 break; |
| 194 case kRepWord16: |
| 195 opcode = kMipsSh; |
| 196 break; |
| 197 case kRepTagged: // Fall through. |
| 198 case kRepWord32: |
| 199 opcode = kMipsSw; |
| 200 break; |
| 201 default: |
| 202 UNREACHABLE(); |
| 203 return; |
| 204 } |
| 205 |
| 206 if (g.CanBeImmediate(index, opcode)) { |
| 207 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, |
| 208 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value)); |
| 209 } else { |
| 210 InstructionOperand* addr_reg = g.TempRegister(); |
| 211 Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, |
| 212 g.UseRegister(index), g.UseRegister(base)); |
| 213 // Emit desired store opcode, using temp addr_reg. |
| 214 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg, |
| 215 g.TempImmediate(0), g.UseRegister(value)); |
| 216 } |
| 217 } |
| 218 |
| 219 |
| 220 void InstructionSelector::VisitWord32And(Node* node) { |
| 221 VisitBinop(this, node, kMipsAnd); |
| 222 } |
| 223 |
| 224 |
| 225 void InstructionSelector::VisitWord32Or(Node* node) { |
| 226 VisitBinop(this, node, kMipsOr); |
| 227 } |
| 228 |
| 229 |
| 230 void InstructionSelector::VisitWord32Xor(Node* node) { |
| 231 VisitBinop(this, node, kMipsXor); |
| 232 } |
| 233 |
| 234 |
| 235 void InstructionSelector::VisitWord32Shl(Node* node) { |
| 236 VisitRRO(this, kMipsShl, node); |
| 237 } |
| 238 |
| 239 |
| 240 void InstructionSelector::VisitWord32Shr(Node* node) { |
| 241 VisitRRO(this, kMipsShr, node); |
| 242 } |
| 243 |
| 244 |
| 245 void InstructionSelector::VisitWord32Sar(Node* node) { |
| 246 VisitRRO(this, kMipsSar, node); |
| 247 } |
| 248 |
| 249 |
| 250 void InstructionSelector::VisitWord32Ror(Node* node) { |
| 251 VisitRRO(this, kMipsRor, node); |
| 252 } |
| 253 |
| 254 |
| 255 void InstructionSelector::VisitInt32Add(Node* node) { |
| 256 MipsOperandGenerator g(this); |
| 257 |
| 258 // TODO(plind): Consider multiply & add optimization from arm port. |
| 259 VisitBinop(this, node, kMipsAdd); |
| 260 } |
| 261 |
| 262 |
| 263 void InstructionSelector::VisitInt32Sub(Node* node) { |
| 264 VisitBinop(this, node, kMipsSub); |
| 265 } |
| 266 |
| 267 |
| 268 void InstructionSelector::VisitInt32Mul(Node* node) { |
| 269 MipsOperandGenerator g(this); |
| 270 Int32BinopMatcher m(node); |
| 271 if (m.right().HasValue() && m.right().Value() > 0) { |
| 272 int32_t value = m.right().Value(); |
| 273 if (base::bits::IsPowerOfTwo32(value)) { |
| 274 Emit(kMipsShl | AddressingModeField::encode(kMode_None), |
| 275 g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 276 g.TempImmediate(WhichPowerOf2(value))); |
| 277 return; |
| 278 } |
| 279 if (base::bits::IsPowerOfTwo32(value - 1)) { |
| 280 InstructionOperand* temp = g.TempRegister(); |
| 281 Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp, |
| 282 g.UseRegister(m.left().node()), |
| 283 g.TempImmediate(WhichPowerOf2(value - 1))); |
| 284 Emit(kMipsAdd | AddressingModeField::encode(kMode_None), |
| 285 g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp); |
| 286 return; |
| 287 } |
| 288 if (base::bits::IsPowerOfTwo32(value + 1)) { |
| 289 InstructionOperand* temp = g.TempRegister(); |
| 290 Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp, |
| 291 g.UseRegister(m.left().node()), |
| 292 g.TempImmediate(WhichPowerOf2(value + 1))); |
| 293 Emit(kMipsSub | AddressingModeField::encode(kMode_None), |
| 294 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); |
| 295 return; |
| 296 } |
| 297 } |
| 298 Emit(kMipsMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 299 g.UseRegister(m.right().node())); |
| 300 } |
| 301 |
| 302 |
| 303 void InstructionSelector::VisitInt32Div(Node* node) { |
| 304 MipsOperandGenerator g(this); |
| 305 Int32BinopMatcher m(node); |
| 306 Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 307 g.UseRegister(m.right().node())); |
| 308 } |
| 309 |
| 310 |
| 311 void InstructionSelector::VisitInt32UDiv(Node* node) { |
| 312 MipsOperandGenerator g(this); |
| 313 Int32BinopMatcher m(node); |
| 314 Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 315 g.UseRegister(m.right().node())); |
| 316 } |
| 317 |
| 318 |
| 319 void InstructionSelector::VisitInt32Mod(Node* node) { |
| 320 MipsOperandGenerator g(this); |
| 321 Int32BinopMatcher m(node); |
| 322 Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 323 g.UseRegister(m.right().node())); |
| 324 } |
| 325 |
| 326 |
| 327 void InstructionSelector::VisitInt32UMod(Node* node) { |
| 328 MipsOperandGenerator g(this); |
| 329 Int32BinopMatcher m(node); |
| 330 Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 331 g.UseRegister(m.right().node())); |
| 332 } |
| 333 |
| 334 |
| 335 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { |
| 336 MipsOperandGenerator g(this); |
| 337 Emit(kMipsInt32ToFloat64, g.DefineAsRegister(node), |
| 338 g.UseRegister(node->InputAt(0))); |
| 339 } |
| 340 |
| 341 |
| 342 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { |
| 343 MipsOperandGenerator g(this); |
| 344 Emit(kMipsUint32ToFloat64, g.DefineAsRegister(node), |
| 345 g.UseRegister(node->InputAt(0))); |
| 346 } |
| 347 |
| 348 |
| 349 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { |
| 350 MipsOperandGenerator g(this); |
| 351 Emit(kMipsFloat64ToInt32, g.DefineAsRegister(node), |
| 352 g.UseRegister(node->InputAt(0))); |
| 353 } |
| 354 |
| 355 |
| 356 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { |
| 357 MipsOperandGenerator g(this); |
| 358 Emit(kMipsFloat64ToUint32, g.DefineAsRegister(node), |
| 359 g.UseRegister(node->InputAt(0))); |
| 360 } |
| 361 |
| 362 |
| 363 void InstructionSelector::VisitFloat64Add(Node* node) { |
| 364 VisitRRR(this, kMipsFloat64Add, node); |
| 365 } |
| 366 |
| 367 |
| 368 void InstructionSelector::VisitFloat64Sub(Node* node) { |
| 369 VisitRRR(this, kMipsFloat64Sub, node); |
| 370 } |
| 371 |
| 372 |
| 373 void InstructionSelector::VisitFloat64Mul(Node* node) { |
| 374 VisitRRR(this, kMipsFloat64Mul, node); |
| 375 } |
| 376 |
| 377 |
| 378 void InstructionSelector::VisitFloat64Div(Node* node) { |
| 379 VisitRRR(this, kMipsFloat64Div, node); |
| 380 } |
| 381 |
| 382 |
| 383 void InstructionSelector::VisitFloat64Mod(Node* node) { |
| 384 MipsOperandGenerator g(this); |
| 385 Emit(kMipsFloat64Mod, g.DefineAsFixed(node, f0), |
| 386 g.UseFixed(node->InputAt(0), f12), |
| 387 g.UseFixed(node->InputAt(1), f14))->MarkAsCall(); |
| 388 } |
| 389 |
| 390 |
| 391 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, |
| 392 BasicBlock* deoptimization) { |
| 393 MipsOperandGenerator g(this); |
| 394 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call); |
| 395 |
| 396 FrameStateDescriptor* frame_state_descriptor = NULL; |
| 397 if (descriptor->NeedsFrameState()) { |
| 398 frame_state_descriptor = |
| 399 GetFrameStateDescriptor(call->InputAt(descriptor->InputCount())); |
| 400 } |
| 401 |
| 402 CallBuffer buffer(zone(), descriptor, frame_state_descriptor); |
| 403 |
| 404 // Compute InstructionOperands for inputs and outputs. |
| 405 InitializeCallBuffer(call, &buffer, true, false); |
| 406 |
| 407 // TODO(dcarney): might be possible to use claim/poke instead |
| 408 // Push any stack arguments. |
| 409 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin(); |
| 410 input != buffer.pushed_nodes.rend(); input++) { |
| 411 // TODO(plind): inefficient for MIPS, use MultiPush here. |
| 412 // - Also need to align the stack. See arm64. |
| 413 // - Maybe combine with arg slot stuff in DirectCEntry stub. |
| 414 Emit(kMipsPush, NULL, g.UseRegister(*input)); |
| 415 } |
| 416 |
| 417 // Select the appropriate opcode based on the call type. |
| 418 InstructionCode opcode; |
| 419 switch (descriptor->kind()) { |
| 420 case CallDescriptor::kCallCodeObject: { |
| 421 opcode = kArchCallCodeObject; |
| 422 break; |
| 423 } |
| 424 case CallDescriptor::kCallJSFunction: |
| 425 opcode = kArchCallJSFunction; |
| 426 break; |
| 427 default: |
| 428 UNREACHABLE(); |
| 429 return; |
| 430 } |
| 431 opcode |= MiscField::encode(descriptor->flags()); |
| 432 |
| 433 // Emit the call instruction. |
| 434 Instruction* call_instr = |
| 435 Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(), |
| 436 buffer.instruction_args.size(), &buffer.instruction_args.front()); |
| 437 |
| 438 call_instr->MarkAsCall(); |
| 439 if (deoptimization != NULL) { |
| 440 DCHECK(continuation != NULL); |
| 441 call_instr->MarkAsControl(); |
| 442 } |
| 443 } |
| 444 |
| 445 |
| 446 void InstructionSelector::VisitInt32AddWithOverflow(Node* node, |
| 447 FlagsContinuation* cont) { |
| 448 VisitBinop(this, node, kMipsAddOvf, cont); |
| 449 } |
| 450 |
| 451 |
| 452 void InstructionSelector::VisitInt32SubWithOverflow(Node* node, |
| 453 FlagsContinuation* cont) { |
| 454 VisitBinop(this, node, kMipsSubOvf, cont); |
| 455 } |
| 456 |
| 457 |
| 458 // Shared routine for multiple compare operations. |
| 459 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, |
| 460 InstructionOperand* left, InstructionOperand* right, |
| 461 FlagsContinuation* cont) { |
| 462 MipsOperandGenerator g(selector); |
| 463 opcode = cont->Encode(opcode); |
| 464 if (cont->IsBranch()) { |
| 465 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()), |
| 466 g.Label(cont->false_block()))->MarkAsControl(); |
| 467 } else { |
| 468 DCHECK(cont->IsSet()); |
| 469 // TODO(plind): Revisit and test this path. |
| 470 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right); |
| 471 } |
| 472 } |
| 473 |
| 474 |
| 475 // Shared routine for multiple word compare operations. |
| 476 static void VisitWordCompare(InstructionSelector* selector, Node* node, |
| 477 InstructionCode opcode, FlagsContinuation* cont, |
| 478 bool commutative) { |
| 479 MipsOperandGenerator g(selector); |
| 480 Node* left = node->InputAt(0); |
| 481 Node* right = node->InputAt(1); |
| 482 |
| 483 // Match immediates on left or right side of comparison. |
| 484 if (g.CanBeImmediate(right, opcode)) { |
| 485 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), |
| 486 cont); |
| 487 } else if (g.CanBeImmediate(left, opcode)) { |
| 488 if (!commutative) cont->Commute(); |
| 489 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), |
| 490 cont); |
| 491 } else { |
| 492 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), |
| 493 cont); |
| 494 } |
| 495 } |
| 496 |
| 497 |
| 498 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) { |
| 499 switch (node->opcode()) { |
| 500 case IrOpcode::kWord32And: |
| 501 // TODO(plind): understand the significance of 'IR and' special case. |
| 502 return VisitWordCompare(this, node, kMipsTst, cont, true); |
| 503 default: |
| 504 break; |
| 505 } |
| 506 |
| 507 MipsOperandGenerator g(this); |
| 508 // kMipsTst is a pseudo-instruction to do logical 'and' and leave the result |
| 509 // in a dedicated tmp register. |
| 510 VisitCompare(this, kMipsTst, g.UseRegister(node), g.UseRegister(node), cont); |
| 511 } |
| 512 |
| 513 |
| 514 void InstructionSelector::VisitWord32Compare(Node* node, |
| 515 FlagsContinuation* cont) { |
| 516 VisitWordCompare(this, node, kMipsCmp, cont, false); |
| 517 } |
| 518 |
| 519 |
| 520 void InstructionSelector::VisitFloat64Compare(Node* node, |
| 521 FlagsContinuation* cont) { |
| 522 MipsOperandGenerator g(this); |
| 523 Node* left = node->InputAt(0); |
| 524 Node* right = node->InputAt(1); |
| 525 VisitCompare(this, kMipsFloat64Cmp, g.UseRegister(left), g.UseRegister(right), |
| 526 cont); |
| 527 } |
| 528 |
| 529 } // namespace compiler |
| 530 } // namespace internal |
| 531 } // namespace v8 |
OLD | NEW |