OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions |
| 6 // are met: |
| 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. |
| 10 // |
| 11 // - Redistribution in binary form must reproduce the above copyright |
| 12 // notice, this list of conditions and the following disclaimer in the |
| 13 // documentation and/or other materials provided with the |
| 14 // distribution. |
| 15 // |
| 16 // - Neither the name of Sun Microsystems or the names of contributors may |
| 17 // be used to endorse or promote products derived from this software without |
| 18 // specific prior written permission. |
| 19 // |
| 20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
| 31 // OF THE POSSIBILITY OF SUCH DAMAGE. |
| 32 |
| 33 // The original source code covered by the above license above has been |
| 34 // modified significantly by Google Inc. |
| 35 // Copyright 2012 the V8 project authors. All rights reserved. |
| 36 |
| 37 // |
| 38 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 39 // |
| 40 |
| 41 #include "src/v8.h" |
| 42 |
| 43 #if V8_TARGET_ARCH_PPC |
| 44 |
| 45 #include "src/base/cpu.h" |
| 46 #include "src/ppc/assembler-ppc-inl.h" |
| 47 |
| 48 #include "src/macro-assembler.h" |
| 49 #include "src/serialize.h" |
| 50 |
| 51 namespace v8 { |
| 52 namespace internal { |
| 53 |
| 54 // Get the CPU features enabled by the build. |
| 55 static unsigned CpuFeaturesImpliedByCompiler() { |
| 56 unsigned answer = 0; |
| 57 return answer; |
| 58 } |
| 59 |
| 60 |
| 61 void CpuFeatures::ProbeImpl(bool cross_compile) { |
| 62 supported_ |= CpuFeaturesImpliedByCompiler(); |
| 63 cache_line_size_ = 128; |
| 64 |
| 65 // Only use statically determined features for cross compile (snapshot). |
| 66 if (cross_compile) return; |
| 67 |
| 68 // Detect whether frim instruction is supported (POWER5+) |
| 69 // For now we will just check for processors we know do not |
| 70 // support it |
| 71 #ifndef USE_SIMULATOR |
| 72 // Probe for additional features at runtime. |
| 73 base::CPU cpu; |
| 74 #if V8_TARGET_ARCH_PPC64 |
| 75 if (cpu.part() == base::CPU::PPC_POWER8) { |
| 76 supported_ |= (1u << FPR_GPR_MOV); |
| 77 } |
| 78 #endif |
| 79 #if V8_OS_LINUX |
| 80 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) { |
| 81 // Assume support |
| 82 supported_ |= (1u << FPU); |
| 83 } |
| 84 if (cpu.cache_line_size() != 0) { |
| 85 cache_line_size_ = cpu.cache_line_size(); |
| 86 } |
| 87 #elif V8_OS_AIX |
| 88 // Assume support FP support and default cache line size |
| 89 supported_ |= (1u << FPU); |
| 90 #endif |
| 91 #else // Simulator |
| 92 supported_ |= (1u << FPU); |
| 93 #if V8_TARGET_ARCH_PPC64 |
| 94 supported_ |= (1u << FPR_GPR_MOV); |
| 95 #endif |
| 96 #endif |
| 97 } |
| 98 |
| 99 |
| 100 void CpuFeatures::PrintTarget() { |
| 101 const char* ppc_arch = NULL; |
| 102 |
| 103 #if V8_TARGET_ARCH_PPC64 |
| 104 ppc_arch = "ppc64"; |
| 105 #else |
| 106 ppc_arch = "ppc"; |
| 107 #endif |
| 108 |
| 109 printf("target %s\n", ppc_arch); |
| 110 } |
| 111 |
| 112 |
| 113 void CpuFeatures::PrintFeatures() { |
| 114 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU)); |
| 115 } |
| 116 |
| 117 |
| 118 Register ToRegister(int num) { |
| 119 DCHECK(num >= 0 && num < kNumRegisters); |
| 120 const Register kRegisters[] = { |
| 121 r0, |
| 122 sp, |
| 123 r2, r3, r4, r5, r6, r7, r8, r9, r10, |
| 124 r11, ip, r13, r14, r15, |
| 125 r16, r17, r18, r19, r20, r21, r22, r23, r24, |
| 126 r25, r26, r27, r28, r29, r30, fp |
| 127 }; |
| 128 return kRegisters[num]; |
| 129 } |
| 130 |
| 131 |
| 132 const char* DoubleRegister::AllocationIndexToString(int index) { |
| 133 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 134 const char* const names[] = { |
| 135 "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", |
| 136 "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", |
| 137 "d26", "d27", "d28", "d29", "d30", "d31" |
| 138 }; |
| 139 return names[index]; |
| 140 } |
| 141 |
| 142 |
| 143 // ----------------------------------------------------------------------------- |
| 144 // Implementation of RelocInfo |
| 145 |
| 146 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; |
| 147 |
| 148 |
| 149 bool RelocInfo::IsCodedSpecially() { |
| 150 // The deserializer needs to know whether a pointer is specially |
| 151 // coded. Being specially coded on PPC means that it is a lis/ori |
| 152 // instruction sequence or is an out of line constant pool entry, |
| 153 // and these are always the case inside code objects. |
| 154 return true; |
| 155 } |
| 156 |
| 157 |
| 158 bool RelocInfo::IsInConstantPool() { |
| 159 #if V8_OOL_CONSTANT_POOL |
| 160 return Assembler::IsConstantPoolLoadStart(pc_); |
| 161 #else |
| 162 return false; |
| 163 #endif |
| 164 } |
| 165 |
| 166 |
| 167 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { |
| 168 // Patch the code at the current address with the supplied instructions. |
| 169 Instr* pc = reinterpret_cast<Instr*>(pc_); |
| 170 Instr* instr = reinterpret_cast<Instr*>(instructions); |
| 171 for (int i = 0; i < instruction_count; i++) { |
| 172 *(pc + i) = *(instr + i); |
| 173 } |
| 174 |
| 175 // Indicate that code has changed. |
| 176 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize); |
| 177 } |
| 178 |
| 179 |
| 180 // Patch the code at the current PC with a call to the target address. |
| 181 // Additional guard instructions can be added if required. |
| 182 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { |
| 183 // Patch the code at the current address with a call to the target. |
| 184 UNIMPLEMENTED(); |
| 185 } |
| 186 |
| 187 |
| 188 // ----------------------------------------------------------------------------- |
| 189 // Implementation of Operand and MemOperand |
| 190 // See assembler-ppc-inl.h for inlined constructors |
| 191 |
| 192 Operand::Operand(Handle<Object> handle) { |
| 193 AllowDeferredHandleDereference using_raw_address; |
| 194 rm_ = no_reg; |
| 195 // Verify all Objects referred by code are NOT in new space. |
| 196 Object* obj = *handle; |
| 197 if (obj->IsHeapObject()) { |
| 198 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
| 199 imm_ = reinterpret_cast<intptr_t>(handle.location()); |
| 200 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
| 201 } else { |
| 202 // no relocation needed |
| 203 imm_ = reinterpret_cast<intptr_t>(obj); |
| 204 rmode_ = kRelocInfo_NONEPTR; |
| 205 } |
| 206 } |
| 207 |
| 208 |
| 209 MemOperand::MemOperand(Register rn, int32_t offset) { |
| 210 ra_ = rn; |
| 211 rb_ = no_reg; |
| 212 offset_ = offset; |
| 213 } |
| 214 |
| 215 |
| 216 MemOperand::MemOperand(Register ra, Register rb) { |
| 217 ra_ = ra; |
| 218 rb_ = rb; |
| 219 offset_ = 0; |
| 220 } |
| 221 |
| 222 |
| 223 // ----------------------------------------------------------------------------- |
| 224 // Specific instructions, constants, and masks. |
| 225 |
| 226 // Spare buffer. |
| 227 static const int kMinimalBufferSize = 4*KB; |
| 228 |
| 229 |
| 230 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 231 : AssemblerBase(isolate, buffer, buffer_size), |
| 232 recorded_ast_id_(TypeFeedbackId::None()), |
| 233 #if V8_OOL_CONSTANT_POOL |
| 234 constant_pool_builder_(), |
| 235 #endif |
| 236 positions_recorder_(this) { |
| 237 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); |
| 238 |
| 239 no_trampoline_pool_before_ = 0; |
| 240 trampoline_pool_blocked_nesting_ = 0; |
| 241 // We leave space (kMaxBlockTrampolineSectionSize) |
| 242 // for BlockTrampolinePoolScope buffer. |
| 243 next_buffer_check_ = FLAG_force_long_branches |
| 244 ? kMaxInt : kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; |
| 245 internal_trampoline_exception_ = false; |
| 246 last_bound_pos_ = 0; |
| 247 |
| 248 trampoline_emitted_ = FLAG_force_long_branches; |
| 249 unbound_labels_count_ = 0; |
| 250 |
| 251 #if V8_OOL_CONSTANT_POOL |
| 252 constant_pool_available_ = false; |
| 253 #endif |
| 254 |
| 255 ClearRecordedAstId(); |
| 256 } |
| 257 |
| 258 |
| 259 void Assembler::GetCode(CodeDesc* desc) { |
| 260 // Set up code descriptor. |
| 261 desc->buffer = buffer_; |
| 262 desc->buffer_size = buffer_size_; |
| 263 desc->instr_size = pc_offset(); |
| 264 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 265 desc->origin = this; |
| 266 } |
| 267 |
| 268 |
| 269 void Assembler::Align(int m) { |
| 270 DCHECK(m >= 4 && IsPowerOf2(m)); |
| 271 while ((pc_offset() & (m - 1)) != 0) { |
| 272 nop(); |
| 273 } |
| 274 } |
| 275 |
| 276 |
| 277 void Assembler::CodeTargetAlign() { |
| 278 Align(8); |
| 279 } |
| 280 |
| 281 |
| 282 Condition Assembler::GetCondition(Instr instr) { |
| 283 switch (instr & kCondMask) { |
| 284 case BT: |
| 285 return eq; |
| 286 case BF: |
| 287 return ne; |
| 288 default: |
| 289 UNIMPLEMENTED(); |
| 290 } |
| 291 return al; |
| 292 } |
| 293 |
| 294 |
| 295 bool Assembler::IsLis(Instr instr) { |
| 296 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0); |
| 297 } |
| 298 |
| 299 |
| 300 bool Assembler::IsLi(Instr instr) { |
| 301 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0); |
| 302 } |
| 303 |
| 304 |
| 305 bool Assembler::IsAddic(Instr instr) { |
| 306 return (instr & kOpcodeMask) == ADDIC; |
| 307 } |
| 308 |
| 309 |
| 310 bool Assembler::IsOri(Instr instr) { |
| 311 return (instr & kOpcodeMask) == ORI; |
| 312 } |
| 313 |
| 314 |
| 315 bool Assembler::IsBranch(Instr instr) { |
| 316 return ((instr & kOpcodeMask) == BCX); |
| 317 } |
| 318 |
| 319 |
| 320 Register Assembler::GetRA(Instr instr) { |
| 321 Register reg; |
| 322 reg.code_ = Instruction::RAValue(instr); |
| 323 return reg; |
| 324 } |
| 325 |
| 326 |
| 327 Register Assembler::GetRB(Instr instr) { |
| 328 Register reg; |
| 329 reg.code_ = Instruction::RBValue(instr); |
| 330 return reg; |
| 331 } |
| 332 |
| 333 |
| 334 #if V8_TARGET_ARCH_PPC64 |
| 335 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori) |
| 336 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, |
| 337 Instr instr3, Instr instr4, Instr instr5) { |
| 338 // Check the instructions are indeed a five part load (into r12) |
| 339 // 3d800000 lis r12, 0 |
| 340 // 618c0000 ori r12, r12, 0 |
| 341 // 798c07c6 rldicr r12, r12, 32, 31 |
| 342 // 658c00c3 oris r12, r12, 195 |
| 343 // 618ccd40 ori r12, r12, 52544 |
| 344 return(((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) && |
| 345 (instr3 == 0x798c07c6) && |
| 346 ((instr4 >> 16) == 0x658c) && ((instr5 >> 16) == 0x618c)); |
| 347 } |
| 348 #else |
| 349 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori) |
| 350 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) { |
| 351 // Check the instruction is indeed a two part load (into r12) |
| 352 // 3d802553 lis r12, 9555 |
| 353 // 618c5000 ori r12, r12, 20480 |
| 354 return(((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c)); |
| 355 } |
| 356 #endif |
| 357 |
| 358 |
| 359 bool Assembler::IsCmpRegister(Instr instr) { |
| 360 return (((instr & kOpcodeMask) == EXT2) && |
| 361 ((instr & kExt2OpcodeMask) == CMP)); |
| 362 } |
| 363 |
| 364 |
| 365 bool Assembler::IsRlwinm(Instr instr) { |
| 366 return ((instr & kOpcodeMask) == RLWINMX); |
| 367 } |
| 368 |
| 369 |
| 370 #if V8_TARGET_ARCH_PPC64 |
| 371 bool Assembler::IsRldicl(Instr instr) { |
| 372 return (((instr & kOpcodeMask) == EXT5) && |
| 373 ((instr & kExt5OpcodeMask) == RLDICL)); |
| 374 } |
| 375 #endif |
| 376 |
| 377 |
| 378 bool Assembler::IsCmpImmediate(Instr instr) { |
| 379 return ((instr & kOpcodeMask) == CMPI); |
| 380 } |
| 381 |
| 382 |
| 383 bool Assembler::IsCrSet(Instr instr) { |
| 384 return (((instr & kOpcodeMask) == EXT1) && |
| 385 ((instr & kExt1OpcodeMask) == CREQV)); |
| 386 } |
| 387 |
| 388 |
| 389 Register Assembler::GetCmpImmediateRegister(Instr instr) { |
| 390 DCHECK(IsCmpImmediate(instr)); |
| 391 return GetRA(instr); |
| 392 } |
| 393 |
| 394 |
| 395 int Assembler::GetCmpImmediateRawImmediate(Instr instr) { |
| 396 DCHECK(IsCmpImmediate(instr)); |
| 397 return instr & kOff16Mask; |
| 398 } |
| 399 |
| 400 |
| 401 // Labels refer to positions in the (to be) generated code. |
| 402 // There are bound, linked, and unused labels. |
| 403 // |
| 404 // Bound labels refer to known positions in the already |
| 405 // generated code. pos() is the position the label refers to. |
| 406 // |
| 407 // Linked labels refer to unknown positions in the code |
| 408 // to be generated; pos() is the position of the last |
| 409 // instruction using the label. |
| 410 |
| 411 |
| 412 // The link chain is terminated by a negative code position (must be aligned) |
| 413 const int kEndOfChain = -4; |
| 414 |
| 415 |
| 416 int Assembler::target_at(int pos) { |
| 417 Instr instr = instr_at(pos); |
| 418 // check which type of branch this is 16 or 26 bit offset |
| 419 int opcode = instr & kOpcodeMask; |
| 420 if (BX == opcode) { |
| 421 int imm26 = ((instr & kImm26Mask) << 6) >> 6; |
| 422 imm26 &= ~(kAAMask|kLKMask); // discard AA|LK bits if present |
| 423 if (imm26 == 0) |
| 424 return kEndOfChain; |
| 425 return pos + imm26; |
| 426 } else if (BCX == opcode) { |
| 427 int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask)); |
| 428 imm16 &= ~(kAAMask|kLKMask); // discard AA|LK bits if present |
| 429 if (imm16 == 0) |
| 430 return kEndOfChain; |
| 431 return pos + imm16; |
| 432 } else if ((instr & ~kImm26Mask) == 0) { |
| 433 // Emitted link to a label, not part of a branch (regexp PushBacktrack). |
| 434 if (instr == 0) { |
| 435 return kEndOfChain; |
| 436 } else { |
| 437 int32_t imm26 = SIGN_EXT_IMM26(instr); |
| 438 return (imm26 + pos); |
| 439 } |
| 440 } |
| 441 |
| 442 DCHECK(false); |
| 443 return -1; |
| 444 } |
| 445 |
| 446 |
| 447 void Assembler::target_at_put(int pos, int target_pos) { |
| 448 Instr instr = instr_at(pos); |
| 449 int opcode = instr & kOpcodeMask; |
| 450 |
| 451 // check which type of branch this is 16 or 26 bit offset |
| 452 if (BX == opcode) { |
| 453 int imm26 = target_pos - pos; |
| 454 DCHECK((imm26 & (kAAMask|kLKMask)) == 0); |
| 455 instr &= ((~kImm26Mask)|kAAMask|kLKMask); |
| 456 DCHECK(is_int26(imm26)); |
| 457 instr_at_put(pos, instr | (imm26 & kImm26Mask)); |
| 458 return; |
| 459 } else if (BCX == opcode) { |
| 460 int imm16 = target_pos - pos; |
| 461 DCHECK((imm16 & (kAAMask|kLKMask)) == 0); |
| 462 instr &= ((~kImm16Mask)|kAAMask|kLKMask); |
| 463 DCHECK(is_int16(imm16)); |
| 464 instr_at_put(pos, instr | (imm16 & kImm16Mask)); |
| 465 return; |
| 466 } else if ((instr & ~kImm26Mask) == 0) { |
| 467 DCHECK(target_pos == kEndOfChain || target_pos >= 0); |
| 468 // Emitted link to a label, not part of a branch (regexp PushBacktrack). |
| 469 // Load the position of the label relative to the generated code object |
| 470 // pointer in a register. |
| 471 |
| 472 Register dst = r3; // we assume r3 for now |
| 473 DCHECK(IsNop(instr_at(pos + kInstrSize))); |
| 474 uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag); |
| 475 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), |
| 476 2, |
| 477 CodePatcher::DONT_FLUSH); |
| 478 int target_hi = static_cast<int>(target) >> 16; |
| 479 int target_lo = static_cast<int>(target) & 0XFFFF; |
| 480 |
| 481 patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi))); |
| 482 patcher.masm()->ori(dst, dst, Operand(target_lo)); |
| 483 return; |
| 484 } |
| 485 |
| 486 DCHECK(false); |
| 487 } |
| 488 |
| 489 |
| 490 int Assembler::max_reach_from(int pos) { |
| 491 Instr instr = instr_at(pos); |
| 492 int opcode = instr & kOpcodeMask; |
| 493 |
| 494 // check which type of branch this is 16 or 26 bit offset |
| 495 if (BX == opcode) { |
| 496 return 26; |
| 497 } else if (BCX == opcode) { |
| 498 return 16; |
| 499 } else if ((instr & ~kImm26Mask) == 0) { |
| 500 // Emitted label constant, not part of a branch (regexp PushBacktrack). |
| 501 return 26; |
| 502 } |
| 503 |
| 504 DCHECK(false); |
| 505 return 0; |
| 506 } |
| 507 |
| 508 |
| 509 void Assembler::bind_to(Label* L, int pos) { |
| 510 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position |
| 511 int32_t trampoline_pos = kInvalidSlotPos; |
| 512 if (L->is_linked() && !trampoline_emitted_) { |
| 513 unbound_labels_count_--; |
| 514 next_buffer_check_ += kTrampolineSlotsSize; |
| 515 } |
| 516 |
| 517 while (L->is_linked()) { |
| 518 int fixup_pos = L->pos(); |
| 519 int32_t offset = pos - fixup_pos; |
| 520 int maxReach = max_reach_from(fixup_pos); |
| 521 next(L); // call next before overwriting link with target at fixup_pos |
| 522 if (is_intn(offset, maxReach) == false) { |
| 523 if (trampoline_pos == kInvalidSlotPos) { |
| 524 trampoline_pos = get_trampoline_entry(); |
| 525 CHECK(trampoline_pos != kInvalidSlotPos); |
| 526 target_at_put(trampoline_pos, pos); |
| 527 } |
| 528 target_at_put(fixup_pos, trampoline_pos); |
| 529 } else { |
| 530 target_at_put(fixup_pos, pos); |
| 531 } |
| 532 } |
| 533 L->bind_to(pos); |
| 534 |
| 535 // Keep track of the last bound label so we don't eliminate any instructions |
| 536 // before a bound label. |
| 537 if (pos > last_bound_pos_) |
| 538 last_bound_pos_ = pos; |
| 539 } |
| 540 |
| 541 |
| 542 void Assembler::bind(Label* L) { |
| 543 DCHECK(!L->is_bound()); // label can only be bound once |
| 544 bind_to(L, pc_offset()); |
| 545 } |
| 546 |
| 547 |
| 548 |
| 549 void Assembler::next(Label* L) { |
| 550 DCHECK(L->is_linked()); |
| 551 int link = target_at(L->pos()); |
| 552 if (link == kEndOfChain) { |
| 553 L->Unuse(); |
| 554 } else { |
| 555 DCHECK(link >= 0); |
| 556 L->link_to(link); |
| 557 } |
| 558 } |
| 559 |
| 560 |
| 561 bool Assembler::is_near(Label* L, Condition cond) { |
| 562 DCHECK(L->is_bound()); |
| 563 if (L->is_bound() == false) |
| 564 return false; |
| 565 |
| 566 int maxReach = ((cond == al) ? 26 : 16); |
| 567 int offset = L->pos() - pc_offset(); |
| 568 |
| 569 return is_intn(offset, maxReach); |
| 570 } |
| 571 |
| 572 |
| 573 void Assembler::a_form(Instr instr, |
| 574 DoubleRegister frt, |
| 575 DoubleRegister fra, |
| 576 DoubleRegister frb, |
| 577 RCBit r) { |
| 578 emit(instr | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | r); |
| 579 } |
| 580 |
| 581 |
| 582 void Assembler::d_form(Instr instr, |
| 583 Register rt, |
| 584 Register ra, |
| 585 const intptr_t val, |
| 586 bool signed_disp) { |
| 587 if (signed_disp) { |
| 588 if (!is_int16(val)) { |
| 589 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val); |
| 590 } |
| 591 DCHECK(is_int16(val)); |
| 592 } else { |
| 593 if (!is_uint16(val)) { |
| 594 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR |
| 595 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n", |
| 596 val, val, is_uint16(val), kImm16Mask); |
| 597 } |
| 598 DCHECK(is_uint16(val)); |
| 599 } |
| 600 emit(instr | rt.code()*B21 | ra.code()*B16 | (kImm16Mask & val)); |
| 601 } |
| 602 |
| 603 |
| 604 void Assembler::x_form(Instr instr, |
| 605 Register ra, |
| 606 Register rs, |
| 607 Register rb, |
| 608 RCBit r) { |
| 609 emit(instr | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | r); |
| 610 } |
| 611 |
| 612 |
| 613 void Assembler::xo_form(Instr instr, |
| 614 Register rt, |
| 615 Register ra, |
| 616 Register rb, |
| 617 OEBit o, |
| 618 RCBit r) { |
| 619 emit(instr | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | o | r); |
| 620 } |
| 621 |
| 622 |
| 623 void Assembler::md_form(Instr instr, |
| 624 Register ra, |
| 625 Register rs, |
| 626 int shift, |
| 627 int maskbit, |
| 628 RCBit r) { |
| 629 int sh0_4 = shift & 0x1f; |
| 630 int sh5 = (shift >> 5) & 0x1; |
| 631 int m0_4 = maskbit & 0x1f; |
| 632 int m5 = (maskbit >> 5) & 0x1; |
| 633 |
| 634 emit(instr | rs.code()*B21 | ra.code()*B16 | |
| 635 sh0_4*B11 | m0_4*B6 | m5*B5 | sh5*B1 | r); |
| 636 } |
| 637 |
| 638 |
| 639 void Assembler::mds_form(Instr instr, |
| 640 Register ra, |
| 641 Register rs, |
| 642 Register rb, |
| 643 int maskbit, |
| 644 RCBit r) { |
| 645 int m0_4 = maskbit & 0x1f; |
| 646 int m5 = (maskbit >> 5) & 0x1; |
| 647 |
| 648 emit(instr | rs.code()*B21 | ra.code()*B16 | |
| 649 rb.code()*B11 | m0_4*B6 | m5*B5 | r); |
| 650 } |
| 651 |
| 652 |
| 653 // Returns the next free trampoline entry. |
| 654 int32_t Assembler::get_trampoline_entry() { |
| 655 int32_t trampoline_entry = kInvalidSlotPos; |
| 656 |
| 657 if (!internal_trampoline_exception_) { |
| 658 trampoline_entry = trampoline_.take_slot(); |
| 659 |
| 660 if (kInvalidSlotPos == trampoline_entry) { |
| 661 internal_trampoline_exception_ = true; |
| 662 } |
| 663 } |
| 664 return trampoline_entry; |
| 665 } |
| 666 |
| 667 |
| 668 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
| 669 int target_pos; |
| 670 if (L->is_bound()) { |
| 671 target_pos = L->pos(); |
| 672 } else { |
| 673 if (L->is_linked()) { |
| 674 target_pos = L->pos(); // L's link |
| 675 } else { |
| 676 // was: target_pos = kEndOfChain; |
| 677 // However, using branch to self to mark the first reference |
| 678 // should avoid most instances of branch offset overflow. See |
| 679 // target_at() for where this is converted back to kEndOfChain. |
| 680 target_pos = pc_offset(); |
| 681 if (!trampoline_emitted_) { |
| 682 unbound_labels_count_++; |
| 683 next_buffer_check_ -= kTrampolineSlotsSize; |
| 684 } |
| 685 } |
| 686 L->link_to(pc_offset()); |
| 687 } |
| 688 |
| 689 return target_pos - pc_offset(); |
| 690 } |
| 691 |
| 692 |
| 693 // Branch instructions. |
| 694 |
| 695 |
| 696 void Assembler::bclr(BOfield bo, LKBit lk) { |
| 697 positions_recorder()->WriteRecordedPositions(); |
| 698 emit(EXT1 | bo | BCLRX | lk); |
| 699 } |
| 700 |
| 701 |
| 702 void Assembler::bcctr(BOfield bo, LKBit lk) { |
| 703 positions_recorder()->WriteRecordedPositions(); |
| 704 emit(EXT1 | bo | BCCTRX | lk); |
| 705 } |
| 706 |
| 707 |
| 708 // Pseudo op - branch to link register |
| 709 void Assembler::blr() { |
| 710 bclr(BA, LeaveLK); |
| 711 } |
| 712 |
| 713 |
| 714 // Pseudo op - branch to count register -- used for "jump" |
| 715 void Assembler::bctr() { |
| 716 bcctr(BA, LeaveLK); |
| 717 } |
| 718 |
| 719 |
| 720 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) { |
| 721 if (lk == SetLK) { |
| 722 positions_recorder()->WriteRecordedPositions(); |
| 723 } |
| 724 DCHECK(is_int16(branch_offset)); |
| 725 emit(BCX | bo | condition_bit*B16 | (kImm16Mask & branch_offset) | lk); |
| 726 } |
| 727 |
| 728 |
| 729 void Assembler::b(int branch_offset, LKBit lk) { |
| 730 if (lk == SetLK) { |
| 731 positions_recorder()->WriteRecordedPositions(); |
| 732 } |
| 733 DCHECK((branch_offset & 3) == 0); |
| 734 int imm26 = branch_offset; |
| 735 DCHECK(is_int26(imm26)); |
| 736 // todo add AA and LK bits |
| 737 emit(BX | (imm26 & kImm26Mask) | lk); |
| 738 } |
| 739 |
| 740 |
| 741 void Assembler::xori(Register dst, Register src, const Operand& imm) { |
| 742 d_form(XORI, src, dst, imm.imm_, false); |
| 743 } |
| 744 |
| 745 |
| 746 void Assembler::xoris(Register ra, Register rs, const Operand& imm) { |
| 747 d_form(XORIS, rs, ra, imm.imm_, false); |
| 748 } |
| 749 |
| 750 |
| 751 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) { |
| 752 x_form(EXT2 | XORX, dst, src1, src2, rc); |
| 753 } |
| 754 |
| 755 |
| 756 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) { |
| 757 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc); |
| 758 } |
| 759 |
| 760 |
| 761 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) { |
| 762 x_form(EXT2 | ANDX, ra, rs, rb, rc); |
| 763 } |
| 764 |
| 765 |
| 766 void Assembler::rlwinm(Register ra, Register rs, |
| 767 int sh, int mb, int me, RCBit rc) { |
| 768 sh &= 0x1f; |
| 769 mb &= 0x1f; |
| 770 me &= 0x1f; |
| 771 emit(RLWINMX | rs.code()*B21 | ra.code()*B16 | sh*B11 | mb*B6 | me << 1 | rc); |
| 772 } |
| 773 |
| 774 |
| 775 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me, |
| 776 RCBit rc) { |
| 777 mb &= 0x1f; |
| 778 me &= 0x1f; |
| 779 emit(RLWNMX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | |
| 780 mb*B6 | me << 1 | rc); |
| 781 } |
| 782 |
| 783 |
| 784 void Assembler::rlwimi(Register ra, Register rs, |
| 785 int sh, int mb, int me, RCBit rc) { |
| 786 sh &= 0x1f; |
| 787 mb &= 0x1f; |
| 788 me &= 0x1f; |
| 789 emit(RLWIMIX | rs.code()*B21 | ra.code()*B16 | sh*B11 | mb*B6 | me << 1 | rc); |
| 790 } |
| 791 |
| 792 |
| 793 void Assembler::slwi(Register dst, Register src, const Operand& val, |
| 794 RCBit rc) { |
| 795 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 796 rlwinm(dst, src, val.imm_, 0, 31-val.imm_, rc); |
| 797 } |
| 798 |
| 799 |
| 800 void Assembler::srwi(Register dst, Register src, const Operand& val, |
| 801 RCBit rc) { |
| 802 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 803 rlwinm(dst, src, 32-val.imm_, val.imm_, 31, rc); |
| 804 } |
| 805 |
| 806 |
| 807 void Assembler::clrrwi(Register dst, Register src, const Operand& val, |
| 808 RCBit rc) { |
| 809 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 810 rlwinm(dst, src, 0, 0, 31-val.imm_, rc); |
| 811 } |
| 812 |
| 813 |
| 814 void Assembler::clrlwi(Register dst, Register src, const Operand& val, |
| 815 RCBit rc) { |
| 816 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 817 rlwinm(dst, src, 0, val.imm_, 31, rc); |
| 818 } |
| 819 |
| 820 |
| 821 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) { |
| 822 emit(EXT2 | SRAWIX | rs.code()*B21 | ra.code()*B16 | sh*B11 | r); |
| 823 } |
| 824 |
| 825 |
| 826 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) { |
| 827 x_form(EXT2 | SRWX, dst, src1, src2, r); |
| 828 } |
| 829 |
| 830 |
| 831 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) { |
| 832 x_form(EXT2 | SLWX, dst, src1, src2, r); |
| 833 } |
| 834 |
| 835 |
| 836 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) { |
| 837 x_form(EXT2 | SRAW, ra, rs, rb, r); |
| 838 } |
| 839 |
| 840 |
| 841 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) { |
| 842 rlwnm(ra, rs, rb, 0, 31, r); |
| 843 } |
| 844 |
| 845 |
| 846 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) { |
| 847 rlwinm(ra, rs, sh, 0, 31, r); |
| 848 } |
| 849 |
| 850 |
| 851 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) { |
| 852 rlwinm(ra, rs, 32 - sh, 0, 31, r); |
| 853 } |
| 854 |
| 855 |
| 856 void Assembler::subi(Register dst, Register src, const Operand& imm) { |
| 857 addi(dst, src, Operand(-(imm.imm_))); |
| 858 } |
| 859 |
| 860 void Assembler::addc(Register dst, Register src1, Register src2, |
| 861 OEBit o, RCBit r) { |
| 862 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r); |
| 863 } |
| 864 |
| 865 |
| 866 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) { |
| 867 // a special xo_form |
| 868 emit(EXT2 | ADDZEX | dst.code()*B21 | src1.code()*B16 | o | r); |
| 869 } |
| 870 |
| 871 |
| 872 void Assembler::sub(Register dst, Register src1, Register src2, |
| 873 OEBit o, RCBit r) { |
| 874 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r); |
| 875 } |
| 876 |
| 877 |
| 878 void Assembler::subfc(Register dst, Register src1, Register src2, |
| 879 OEBit o, RCBit r) { |
| 880 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r); |
| 881 } |
| 882 |
| 883 |
| 884 void Assembler::subfic(Register dst, Register src, const Operand& imm) { |
| 885 d_form(SUBFIC, dst, src, imm.imm_, true); |
| 886 } |
| 887 |
| 888 |
| 889 void Assembler::add(Register dst, Register src1, Register src2, |
| 890 OEBit o, RCBit r) { |
| 891 xo_form(EXT2 | ADDX, dst, src1, src2, o, r); |
| 892 } |
| 893 |
| 894 |
| 895 // Multiply low word |
| 896 void Assembler::mullw(Register dst, Register src1, Register src2, |
| 897 OEBit o, RCBit r) { |
| 898 xo_form(EXT2 | MULLW, dst, src1, src2, o, r); |
| 899 } |
| 900 |
| 901 |
| 902 // Multiply hi word |
| 903 void Assembler::mulhw(Register dst, Register src1, Register src2, |
| 904 OEBit o, RCBit r) { |
| 905 xo_form(EXT2 | MULHWX, dst, src1, src2, o, r); |
| 906 } |
| 907 |
| 908 |
| 909 // Divide word |
| 910 void Assembler::divw(Register dst, Register src1, Register src2, |
| 911 OEBit o, RCBit r) { |
| 912 xo_form(EXT2 | DIVW, dst, src1, src2, o, r); |
| 913 } |
| 914 |
| 915 |
| 916 void Assembler::addi(Register dst, Register src, const Operand& imm) { |
| 917 DCHECK(!src.is(r0)); // use li instead to show intent |
| 918 d_form(ADDI, dst, src, imm.imm_, true); |
| 919 } |
| 920 |
| 921 |
| 922 void Assembler::addis(Register dst, Register src, const Operand& imm) { |
| 923 DCHECK(!src.is(r0)); // use lis instead to show intent |
| 924 d_form(ADDIS, dst, src, imm.imm_, true); |
| 925 } |
| 926 |
| 927 |
| 928 void Assembler::addic(Register dst, Register src, const Operand& imm) { |
| 929 d_form(ADDIC, dst, src, imm.imm_, true); |
| 930 } |
| 931 |
| 932 |
| 933 void Assembler::andi(Register ra, Register rs, const Operand& imm) { |
| 934 d_form(ANDIx, rs, ra, imm.imm_, false); |
| 935 } |
| 936 |
| 937 |
| 938 void Assembler::andis(Register ra, Register rs, const Operand& imm) { |
| 939 d_form(ANDISx, rs, ra, imm.imm_, false); |
| 940 } |
| 941 |
| 942 |
| 943 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) { |
| 944 x_form(EXT2 | NORX, dst, src1, src2, r); |
| 945 } |
| 946 |
| 947 |
| 948 void Assembler::notx(Register dst, Register src, RCBit r) { |
| 949 x_form(EXT2 | NORX, dst, src, src, r); |
| 950 } |
| 951 |
| 952 |
| 953 void Assembler::ori(Register ra, Register rs, const Operand& imm) { |
| 954 d_form(ORI, rs, ra, imm.imm_, false); |
| 955 } |
| 956 |
| 957 |
| 958 void Assembler::oris(Register dst, Register src, const Operand& imm) { |
| 959 d_form(ORIS, src, dst, imm.imm_, false); |
| 960 } |
| 961 |
| 962 |
| 963 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) { |
| 964 x_form(EXT2 | ORX, dst, src1, src2, rc); |
| 965 } |
| 966 |
| 967 |
| 968 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) { |
| 969 intptr_t imm16 = src2.imm_; |
| 970 #if V8_TARGET_ARCH_PPC64 |
| 971 int L = 1; |
| 972 #else |
| 973 int L = 0; |
| 974 #endif |
| 975 DCHECK(is_int16(imm16)); |
| 976 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 977 imm16 &= kImm16Mask; |
| 978 emit(CMPI | cr.code()*B23 | L*B21 | src1.code()*B16 | imm16); |
| 979 } |
| 980 |
| 981 |
| 982 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) { |
| 983 uintptr_t uimm16 = src2.imm_; |
| 984 #if V8_TARGET_ARCH_PPC64 |
| 985 int L = 1; |
| 986 #else |
| 987 int L = 0; |
| 988 #endif |
| 989 DCHECK(is_uint16(uimm16)); |
| 990 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 991 uimm16 &= kImm16Mask; |
| 992 emit(CMPLI | cr.code()*B23 | L*B21 | src1.code()*B16 | uimm16); |
| 993 } |
| 994 |
| 995 |
| 996 void Assembler::cmp(Register src1, Register src2, CRegister cr) { |
| 997 #if V8_TARGET_ARCH_PPC64 |
| 998 int L = 1; |
| 999 #else |
| 1000 int L = 0; |
| 1001 #endif |
| 1002 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1003 emit(EXT2 | CMP | cr.code()*B23 | L*B21 | src1.code()*B16 | |
| 1004 src2.code()*B11); |
| 1005 } |
| 1006 |
| 1007 |
| 1008 void Assembler::cmpl(Register src1, Register src2, CRegister cr) { |
| 1009 #if V8_TARGET_ARCH_PPC64 |
| 1010 int L = 1; |
| 1011 #else |
| 1012 int L = 0; |
| 1013 #endif |
| 1014 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1015 emit(EXT2 | CMPL | cr.code()*B23 | L*B21 | src1.code()*B16 | |
| 1016 src2.code()*B11); |
| 1017 } |
| 1018 |
| 1019 |
| 1020 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) { |
| 1021 intptr_t imm16 = src2.imm_; |
| 1022 int L = 0; |
| 1023 DCHECK(is_int16(imm16)); |
| 1024 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1025 imm16 &= kImm16Mask; |
| 1026 emit(CMPI | cr.code()*B23 | L*B21 | src1.code()*B16 | imm16); |
| 1027 } |
| 1028 |
| 1029 |
| 1030 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) { |
| 1031 uintptr_t uimm16 = src2.imm_; |
| 1032 int L = 0; |
| 1033 DCHECK(is_uint16(uimm16)); |
| 1034 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1035 uimm16 &= kImm16Mask; |
| 1036 emit(CMPLI | cr.code()*B23 | L*B21 | src1.code()*B16 | uimm16); |
| 1037 } |
| 1038 |
| 1039 |
| 1040 void Assembler::cmpw(Register src1, Register src2, CRegister cr) { |
| 1041 int L = 0; |
| 1042 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1043 emit(EXT2 | CMP | cr.code()*B23 | L*B21 | src1.code()*B16 | |
| 1044 src2.code()*B11); |
| 1045 } |
| 1046 |
| 1047 |
| 1048 void Assembler::cmplw(Register src1, Register src2, CRegister cr) { |
| 1049 int L = 0; |
| 1050 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1051 emit(EXT2 | CMPL | cr.code()*B23 | L*B21 | src1.code()*B16 | |
| 1052 src2.code()*B11); |
| 1053 } |
| 1054 |
| 1055 |
| 1056 // Pseudo op - load immediate |
| 1057 void Assembler::li(Register dst, const Operand &imm) { |
| 1058 d_form(ADDI, dst, r0, imm.imm_, true); |
| 1059 } |
| 1060 |
| 1061 |
| 1062 void Assembler::lis(Register dst, const Operand& imm) { |
| 1063 d_form(ADDIS, dst, r0, imm.imm_, true); |
| 1064 } |
| 1065 |
| 1066 |
| 1067 // Pseudo op - move register |
| 1068 void Assembler::mr(Register dst, Register src) { |
| 1069 // actually or(dst, src, src) |
| 1070 orx(dst, src, src); |
| 1071 } |
| 1072 |
| 1073 |
| 1074 void Assembler::lbz(Register dst, const MemOperand &src) { |
| 1075 DCHECK(!src.ra_.is(r0)); |
| 1076 d_form(LBZ, dst, src.ra(), src.offset(), true); |
| 1077 } |
| 1078 |
| 1079 |
| 1080 void Assembler::lbzx(Register rt, const MemOperand &src) { |
| 1081 Register ra = src.ra(); |
| 1082 Register rb = src.rb(); |
| 1083 DCHECK(!ra.is(r0)); |
| 1084 emit(EXT2 | LBZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1085 } |
| 1086 |
| 1087 |
| 1088 void Assembler::lbzux(Register rt, const MemOperand & src) { |
| 1089 Register ra = src.ra(); |
| 1090 Register rb = src.rb(); |
| 1091 DCHECK(!ra.is(r0)); |
| 1092 emit(EXT2 | LBZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1093 } |
| 1094 |
| 1095 |
| 1096 void Assembler::lhz(Register dst, const MemOperand &src) { |
| 1097 DCHECK(!src.ra_.is(r0)); |
| 1098 d_form(LHZ, dst, src.ra(), src.offset(), true); |
| 1099 } |
| 1100 |
| 1101 |
| 1102 void Assembler::lhzx(Register rt, const MemOperand &src) { |
| 1103 Register ra = src.ra(); |
| 1104 Register rb = src.rb(); |
| 1105 DCHECK(!ra.is(r0)); |
| 1106 emit(EXT2 | LHZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1107 } |
| 1108 |
| 1109 |
| 1110 void Assembler::lhzux(Register rt, const MemOperand & src) { |
| 1111 Register ra = src.ra(); |
| 1112 Register rb = src.rb(); |
| 1113 DCHECK(!ra.is(r0)); |
| 1114 emit(EXT2 | LHZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1115 } |
| 1116 |
| 1117 |
| 1118 void Assembler::lwz(Register dst, const MemOperand &src) { |
| 1119 DCHECK(!src.ra_.is(r0)); |
| 1120 d_form(LWZ, dst, src.ra(), src.offset(), true); |
| 1121 } |
| 1122 |
| 1123 |
| 1124 void Assembler::lwzu(Register dst, const MemOperand &src) { |
| 1125 DCHECK(!src.ra_.is(r0)); |
| 1126 d_form(LWZU, dst, src.ra(), src.offset(), true); |
| 1127 } |
| 1128 |
| 1129 |
| 1130 void Assembler::lwzx(Register rt, const MemOperand &src) { |
| 1131 Register ra = src.ra(); |
| 1132 Register rb = src.rb(); |
| 1133 DCHECK(!ra.is(r0)); |
| 1134 emit(EXT2 | LWZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1135 } |
| 1136 |
| 1137 |
| 1138 void Assembler::lwzux(Register rt, const MemOperand & src) { |
| 1139 Register ra = src.ra(); |
| 1140 Register rb = src.rb(); |
| 1141 DCHECK(!ra.is(r0)); |
| 1142 emit(EXT2 | LWZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1143 } |
| 1144 |
| 1145 |
| 1146 void Assembler::lwa(Register dst, const MemOperand &src) { |
| 1147 #if V8_TARGET_ARCH_PPC64 |
| 1148 int offset = src.offset(); |
| 1149 DCHECK(!src.ra_.is(r0)); |
| 1150 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1151 offset = kImm16Mask & offset; |
| 1152 emit(LD | dst.code()*B21 | src.ra().code()*B16 | offset | 2); |
| 1153 #else |
| 1154 lwz(dst, src); |
| 1155 #endif |
| 1156 } |
| 1157 |
| 1158 |
| 1159 void Assembler::stb(Register dst, const MemOperand &src) { |
| 1160 DCHECK(!src.ra_.is(r0)); |
| 1161 d_form(STB, dst, src.ra(), src.offset(), true); |
| 1162 } |
| 1163 |
| 1164 |
| 1165 void Assembler::stbx(Register rs, const MemOperand &src) { |
| 1166 Register ra = src.ra(); |
| 1167 Register rb = src.rb(); |
| 1168 DCHECK(!ra.is(r0)); |
| 1169 emit(EXT2 | STBX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1170 } |
| 1171 |
| 1172 |
| 1173 void Assembler::stbux(Register rs, const MemOperand &src) { |
| 1174 Register ra = src.ra(); |
| 1175 Register rb = src.rb(); |
| 1176 DCHECK(!ra.is(r0)); |
| 1177 emit(EXT2 | STBUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1178 } |
| 1179 |
| 1180 |
| 1181 void Assembler::sth(Register dst, const MemOperand &src) { |
| 1182 DCHECK(!src.ra_.is(r0)); |
| 1183 d_form(STH, dst, src.ra(), src.offset(), true); |
| 1184 } |
| 1185 |
| 1186 |
| 1187 void Assembler::sthx(Register rs, const MemOperand &src) { |
| 1188 Register ra = src.ra(); |
| 1189 Register rb = src.rb(); |
| 1190 DCHECK(!ra.is(r0)); |
| 1191 emit(EXT2 | STHX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1192 } |
| 1193 |
| 1194 |
| 1195 void Assembler::sthux(Register rs, const MemOperand &src) { |
| 1196 Register ra = src.ra(); |
| 1197 Register rb = src.rb(); |
| 1198 DCHECK(!ra.is(r0)); |
| 1199 emit(EXT2 | STHUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1200 } |
| 1201 |
| 1202 |
| 1203 void Assembler::stw(Register dst, const MemOperand &src) { |
| 1204 DCHECK(!src.ra_.is(r0)); |
| 1205 d_form(STW, dst, src.ra(), src.offset(), true); |
| 1206 } |
| 1207 |
| 1208 |
| 1209 void Assembler::stwu(Register dst, const MemOperand &src) { |
| 1210 DCHECK(!src.ra_.is(r0)); |
| 1211 d_form(STWU, dst, src.ra(), src.offset(), true); |
| 1212 } |
| 1213 |
| 1214 |
| 1215 void Assembler::stwx(Register rs, const MemOperand &src) { |
| 1216 Register ra = src.ra(); |
| 1217 Register rb = src.rb(); |
| 1218 DCHECK(!ra.is(r0)); |
| 1219 emit(EXT2 | STWX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1220 } |
| 1221 |
| 1222 |
| 1223 void Assembler::stwux(Register rs, const MemOperand &src) { |
| 1224 Register ra = src.ra(); |
| 1225 Register rb = src.rb(); |
| 1226 DCHECK(!ra.is(r0)); |
| 1227 emit(EXT2 | STWUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1228 } |
| 1229 |
| 1230 |
| 1231 void Assembler::extsb(Register rs, Register ra, RCBit rc) { |
| 1232 emit(EXT2 | EXTSB | ra.code()*B21 | rs.code()*B16 | rc); |
| 1233 } |
| 1234 |
| 1235 |
| 1236 void Assembler::extsh(Register rs, Register ra, RCBit rc) { |
| 1237 emit(EXT2 | EXTSH | ra.code()*B21 | rs.code()*B16 | rc); |
| 1238 } |
| 1239 |
| 1240 |
| 1241 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) { |
| 1242 emit(EXT2 | NEGX | rt.code()*B21 | ra.code()*B16 | o | r); |
| 1243 } |
| 1244 |
| 1245 |
| 1246 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) { |
| 1247 x_form(EXT2 | ANDCX, dst, src1, src2, rc); |
| 1248 } |
| 1249 |
| 1250 |
| 1251 #if V8_TARGET_ARCH_PPC64 |
| 1252 // 64bit specific instructions |
| 1253 void Assembler::ld(Register rd, const MemOperand &src) { |
| 1254 int offset = src.offset(); |
| 1255 DCHECK(!src.ra_.is(r0)); |
| 1256 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1257 offset = kImm16Mask & offset; |
| 1258 emit(LD | rd.code()*B21 | src.ra().code()*B16 | offset); |
| 1259 } |
| 1260 |
| 1261 |
| 1262 void Assembler::ldx(Register rd, const MemOperand &src) { |
| 1263 Register ra = src.ra(); |
| 1264 Register rb = src.rb(); |
| 1265 DCHECK(!ra.is(r0)); |
| 1266 emit(EXT2 | LDX | rd.code()*B21 | ra.code()*B16 | rb.code()*B11); |
| 1267 } |
| 1268 |
| 1269 |
| 1270 void Assembler::ldu(Register rd, const MemOperand &src) { |
| 1271 int offset = src.offset(); |
| 1272 DCHECK(!src.ra_.is(r0)); |
| 1273 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1274 offset = kImm16Mask & offset; |
| 1275 emit(LD | rd.code()*B21 | src.ra().code()*B16 | offset | 1); |
| 1276 } |
| 1277 |
| 1278 |
| 1279 void Assembler::ldux(Register rd, const MemOperand &src) { |
| 1280 Register ra = src.ra(); |
| 1281 Register rb = src.rb(); |
| 1282 DCHECK(!ra.is(r0)); |
| 1283 emit(EXT2 | LDUX | rd.code()*B21 | ra.code()*B16 | rb.code()*B11); |
| 1284 } |
| 1285 |
| 1286 |
| 1287 void Assembler::std(Register rs, const MemOperand &src) { |
| 1288 int offset = src.offset(); |
| 1289 DCHECK(!src.ra_.is(r0)); |
| 1290 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1291 offset = kImm16Mask & offset; |
| 1292 emit(STD | rs.code()*B21 | src.ra().code()*B16 | offset); |
| 1293 } |
| 1294 |
| 1295 |
| 1296 void Assembler::stdx(Register rs, const MemOperand &src) { |
| 1297 Register ra = src.ra(); |
| 1298 Register rb = src.rb(); |
| 1299 DCHECK(!ra.is(r0)); |
| 1300 emit(EXT2 | STDX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11); |
| 1301 } |
| 1302 |
| 1303 |
| 1304 void Assembler::stdu(Register rs, const MemOperand &src) { |
| 1305 int offset = src.offset(); |
| 1306 DCHECK(!src.ra_.is(r0)); |
| 1307 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1308 offset = kImm16Mask & offset; |
| 1309 emit(STD | rs.code()*B21 | src.ra().code()*B16 | offset | 1); |
| 1310 } |
| 1311 |
| 1312 |
| 1313 void Assembler::stdux(Register rs, const MemOperand &src) { |
| 1314 Register ra = src.ra(); |
| 1315 Register rb = src.rb(); |
| 1316 DCHECK(!ra.is(r0)); |
| 1317 emit(EXT2 | STDUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11); |
| 1318 } |
| 1319 |
| 1320 |
| 1321 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1322 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r); |
| 1323 } |
| 1324 |
| 1325 |
| 1326 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1327 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r); |
| 1328 } |
| 1329 |
| 1330 |
| 1331 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) { |
| 1332 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r); |
| 1333 } |
| 1334 |
| 1335 |
| 1336 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) { |
| 1337 md_form(EXT5 | RLDICR, ra, rs, sh, me, r); |
| 1338 } |
| 1339 |
| 1340 |
| 1341 void Assembler::sldi(Register dst, Register src, const Operand& val, |
| 1342 RCBit rc) { |
| 1343 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1344 rldicr(dst, src, val.imm_, 63-val.imm_, rc); |
| 1345 } |
| 1346 |
| 1347 |
| 1348 void Assembler::srdi(Register dst, Register src, const Operand& val, |
| 1349 RCBit rc) { |
| 1350 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1351 rldicl(dst, src, 64-val.imm_, val.imm_, rc); |
| 1352 } |
| 1353 |
| 1354 |
| 1355 void Assembler::clrrdi(Register dst, Register src, const Operand& val, |
| 1356 RCBit rc) { |
| 1357 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1358 rldicr(dst, src, 0, 63-val.imm_, rc); |
| 1359 } |
| 1360 |
| 1361 |
| 1362 void Assembler::clrldi(Register dst, Register src, const Operand& val, |
| 1363 RCBit rc) { |
| 1364 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1365 rldicl(dst, src, 0, val.imm_, rc); |
| 1366 } |
| 1367 |
| 1368 |
| 1369 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1370 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r); |
| 1371 } |
| 1372 |
| 1373 |
| 1374 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) { |
| 1375 int sh0_4 = sh & 0x1f; |
| 1376 int sh5 = (sh >> 5) & 0x1; |
| 1377 |
| 1378 emit(EXT2 | SRADIX | rs.code()*B21 | ra.code()*B16 | sh0_4*B11 | sh5*B1 | r); |
| 1379 } |
| 1380 |
| 1381 |
| 1382 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) { |
| 1383 x_form(EXT2 | SRDX, dst, src1, src2, r); |
| 1384 } |
| 1385 |
| 1386 |
| 1387 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) { |
| 1388 x_form(EXT2 | SLDX, dst, src1, src2, r); |
| 1389 } |
| 1390 |
| 1391 |
| 1392 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) { |
| 1393 x_form(EXT2 | SRAD, ra, rs, rb, r); |
| 1394 } |
| 1395 |
| 1396 |
| 1397 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) { |
| 1398 rldcl(ra, rs, rb, 0, r); |
| 1399 } |
| 1400 |
| 1401 |
| 1402 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) { |
| 1403 rldicl(ra, rs, sh, 0, r); |
| 1404 } |
| 1405 |
| 1406 |
| 1407 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) { |
| 1408 rldicl(ra, rs, 64 - sh, 0, r); |
| 1409 } |
| 1410 |
| 1411 |
| 1412 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) { |
| 1413 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc); |
| 1414 } |
| 1415 |
| 1416 |
| 1417 void Assembler::extsw(Register rs, Register ra, RCBit rc) { |
| 1418 emit(EXT2 | EXTSW | ra.code()*B21 | rs.code()*B16 | rc); |
| 1419 } |
| 1420 |
| 1421 |
| 1422 void Assembler::mulld(Register dst, Register src1, Register src2, |
| 1423 OEBit o, RCBit r) { |
| 1424 xo_form(EXT2 | MULLD, dst, src1, src2, o, r); |
| 1425 } |
| 1426 |
| 1427 |
| 1428 void Assembler::divd(Register dst, Register src1, Register src2, |
| 1429 OEBit o, RCBit r) { |
| 1430 xo_form(EXT2 | DIVD, dst, src1, src2, o, r); |
| 1431 } |
| 1432 #endif |
| 1433 |
| 1434 |
| 1435 void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) { |
| 1436 DCHECK(fopcode < fLastFaker); |
| 1437 emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode); |
| 1438 } |
| 1439 |
| 1440 |
| 1441 // Function descriptor for AIX. |
| 1442 // Code address skips the function descriptor "header". |
| 1443 // TOC and static chain are ignored and set to 0. |
| 1444 void Assembler::function_descriptor() { |
| 1445 DCHECK(pc_offset() == 0); |
| 1446 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); |
| 1447 emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize); |
| 1448 emit_ptr(0); |
| 1449 emit_ptr(0); |
| 1450 } |
| 1451 |
| 1452 |
| 1453 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 1454 void Assembler::RelocateInternalReference(Address pc, |
| 1455 intptr_t delta, |
| 1456 Address code_start, |
| 1457 ICacheFlushMode icache_flush_mode) { |
| 1458 DCHECK(delta || code_start); |
| 1459 #if ABI_USES_FUNCTION_DESCRIPTORS |
| 1460 uintptr_t *fd = reinterpret_cast<uintptr_t*>(pc); |
| 1461 if (fd[1] == 0 && fd[2] == 0) { |
| 1462 // Function descriptor |
| 1463 if (delta) { |
| 1464 fd[0] += delta; |
| 1465 } else { |
| 1466 fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize; |
| 1467 } |
| 1468 return; |
| 1469 } |
| 1470 #endif |
| 1471 #if V8_OOL_CONSTANT_POOL |
| 1472 // mov for LoadConstantPoolPointerRegister |
| 1473 ConstantPoolArray *constant_pool = NULL; |
| 1474 if (delta) { |
| 1475 code_start = target_address_at(pc, constant_pool) + delta; |
| 1476 } |
| 1477 set_target_address_at(pc, constant_pool, code_start, icache_flush_mode); |
| 1478 #endif |
| 1479 } |
| 1480 |
| 1481 |
| 1482 int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) { |
| 1483 #if ABI_USES_FUNCTION_DESCRIPTORS |
| 1484 uintptr_t *fd = reinterpret_cast<uintptr_t*>(pc); |
| 1485 if (fd[1] == 0 && fd[2] == 0) { |
| 1486 // Function descriptor |
| 1487 SNPrintF(buffer, |
| 1488 "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR "]" |
| 1489 " function descriptor", |
| 1490 fd[0], fd[1], fd[2]); |
| 1491 return kPointerSize * 3; |
| 1492 } |
| 1493 #endif |
| 1494 return 0; |
| 1495 } |
| 1496 #endif |
| 1497 |
| 1498 |
| 1499 int Assembler::instructions_required_for_mov(const Operand& x) const { |
| 1500 #if V8_OOL_CONSTANT_POOL || DEBUG |
| 1501 bool canOptimize = !(x.must_output_reloc_info(this) || |
| 1502 is_trampoline_pool_blocked()); |
| 1503 #endif |
| 1504 #if V8_OOL_CONSTANT_POOL |
| 1505 if (use_constant_pool_for_mov(x, canOptimize)) { |
| 1506 // Current usage guarantees that all constant pool references can |
| 1507 // use the same sequence. |
| 1508 return kMovInstructionsConstantPool; |
| 1509 } |
| 1510 #endif |
| 1511 DCHECK(!canOptimize); |
| 1512 return kMovInstructionsNoConstantPool; |
| 1513 } |
| 1514 |
| 1515 |
| 1516 #if V8_OOL_CONSTANT_POOL |
| 1517 bool Assembler::use_constant_pool_for_mov(const Operand& x, |
| 1518 bool canOptimize) const { |
| 1519 if (!is_constant_pool_available() || is_constant_pool_full()) { |
| 1520 // If there is no constant pool available, we must use a mov |
| 1521 // immediate sequence. |
| 1522 return false; |
| 1523 } |
| 1524 |
| 1525 intptr_t value = x.immediate(); |
| 1526 if (canOptimize && is_int16(value)) { |
| 1527 // Prefer a single-instruction load-immediate. |
| 1528 return false; |
| 1529 } |
| 1530 |
| 1531 return true; |
| 1532 } |
| 1533 |
| 1534 |
| 1535 void Assembler::EnsureSpaceFor(int space_needed) { |
| 1536 if (buffer_space() <= (kGap + space_needed)) { |
| 1537 GrowBuffer(); |
| 1538 } |
| 1539 } |
| 1540 #endif |
| 1541 |
| 1542 |
| 1543 bool Operand::must_output_reloc_info(const Assembler* assembler) const { |
| 1544 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { |
| 1545 if (assembler != NULL && assembler->predictable_code_size()) return true; |
| 1546 return assembler->serializer_enabled(); |
| 1547 } else if (RelocInfo::IsNone(rmode_)) { |
| 1548 return false; |
| 1549 } |
| 1550 return true; |
| 1551 } |
| 1552 |
| 1553 |
| 1554 // Primarily used for loading constants |
| 1555 // This should really move to be in macro-assembler as it |
| 1556 // is really a pseudo instruction |
| 1557 // Some usages of this intend for a FIXED_SEQUENCE to be used |
| 1558 // Todo - break this dependency so we can optimize mov() in general |
| 1559 // and only use the generic version when we require a fixed sequence |
| 1560 void Assembler::mov(Register dst, const Operand& src) { |
| 1561 intptr_t value = src.immediate(); |
| 1562 bool canOptimize; |
| 1563 RelocInfo rinfo(pc_, src.rmode_, value, NULL); |
| 1564 |
| 1565 if (src.must_output_reloc_info(this)) { |
| 1566 RecordRelocInfo(rinfo); |
| 1567 } |
| 1568 |
| 1569 canOptimize = !(src.must_output_reloc_info(this) || |
| 1570 is_trampoline_pool_blocked()); |
| 1571 |
| 1572 #if V8_OOL_CONSTANT_POOL |
| 1573 if (use_constant_pool_for_mov(src, canOptimize)) { |
| 1574 DCHECK(is_constant_pool_available()); |
| 1575 ConstantPoolAddEntry(rinfo); |
| 1576 #if V8_TARGET_ARCH_PPC64 |
| 1577 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1578 // We are forced to use 2 instruction sequence since the constant |
| 1579 // pool pointer is tagged. |
| 1580 li(dst, Operand::Zero()); |
| 1581 ldx(dst, MemOperand(kConstantPoolRegister, dst)); |
| 1582 #else |
| 1583 lwz(dst, MemOperand(kConstantPoolRegister, 0)); |
| 1584 #endif |
| 1585 return; |
| 1586 } |
| 1587 #endif |
| 1588 |
| 1589 if (canOptimize) { |
| 1590 if (is_int16(value)) { |
| 1591 li(dst, Operand(value)); |
| 1592 } else { |
| 1593 uint16_t u16; |
| 1594 #if V8_TARGET_ARCH_PPC64 |
| 1595 if (is_int32(value)) { |
| 1596 #endif |
| 1597 lis(dst, Operand(value >> 16)); |
| 1598 #if V8_TARGET_ARCH_PPC64 |
| 1599 } else { |
| 1600 if (is_int48(value)) { |
| 1601 li(dst, Operand(value >> 32)); |
| 1602 } else { |
| 1603 lis(dst, Operand(value >> 48)); |
| 1604 u16 = ((value >> 32) & 0xffff); |
| 1605 if (u16) { |
| 1606 ori(dst, dst, Operand(u16)); |
| 1607 } |
| 1608 } |
| 1609 sldi(dst, dst, Operand(32)); |
| 1610 u16 = ((value >> 16) & 0xffff); |
| 1611 if (u16) { |
| 1612 oris(dst, dst, Operand(u16)); |
| 1613 } |
| 1614 } |
| 1615 #endif |
| 1616 u16 = (value & 0xffff); |
| 1617 if (u16) { |
| 1618 ori(dst, dst, Operand(u16)); |
| 1619 } |
| 1620 } |
| 1621 return; |
| 1622 } |
| 1623 |
| 1624 DCHECK(!canOptimize); |
| 1625 |
| 1626 { |
| 1627 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1628 #if V8_TARGET_ARCH_PPC64 |
| 1629 int32_t hi_32 = static_cast<int32_t>(value >> 32); |
| 1630 int32_t lo_32 = static_cast<int32_t>(value); |
| 1631 int hi_word = static_cast<int>(hi_32 >> 16); |
| 1632 int lo_word = static_cast<int>(hi_32 & 0xffff); |
| 1633 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); |
| 1634 ori(dst, dst, Operand(lo_word)); |
| 1635 sldi(dst, dst, Operand(32)); |
| 1636 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff)); |
| 1637 lo_word = static_cast<int>(lo_32 & 0xffff); |
| 1638 oris(dst, dst, Operand(hi_word)); |
| 1639 ori(dst, dst, Operand(lo_word)); |
| 1640 #else |
| 1641 int hi_word = static_cast<int>(value >> 16); |
| 1642 int lo_word = static_cast<int>(value & 0xffff); |
| 1643 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); |
| 1644 ori(dst, dst, Operand(lo_word)); |
| 1645 #endif |
| 1646 } |
| 1647 } |
| 1648 |
| 1649 |
| 1650 void Assembler::mov_label_offset(Register dst, Label* label) { |
| 1651 if (label->is_bound()) { |
| 1652 int target = label->pos(); |
| 1653 mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag)); |
| 1654 } else { |
| 1655 bool is_linked = label->is_linked(); |
| 1656 // Emit the link to the label in the code stream followed by extra |
| 1657 // nop instructions. |
| 1658 DCHECK(dst.is(r3)); // target_at_put assumes r3 for now |
| 1659 int link = is_linked ? label->pos() - pc_offset(): 0; |
| 1660 label->link_to(pc_offset()); |
| 1661 |
| 1662 if (!is_linked && !trampoline_emitted_) { |
| 1663 unbound_labels_count_++; |
| 1664 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1665 } |
| 1666 |
| 1667 // When the label is bound, these instructions will be patched |
| 1668 // with a 2 instruction mov sequence that will load the |
| 1669 // destination register with the position of the label from the |
| 1670 // beginning of the code. |
| 1671 // |
| 1672 // When the label gets bound: target_at extracts the link and |
| 1673 // target_at_put patches the instructions. |
| 1674 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1675 emit(link); |
| 1676 nop(); |
| 1677 } |
| 1678 } |
| 1679 |
| 1680 |
| 1681 // Special register instructions |
| 1682 void Assembler::crxor(int bt, int ba, int bb) { |
| 1683 emit(EXT1 | CRXOR | bt*B21 | ba*B16 | bb*B11); |
| 1684 } |
| 1685 |
| 1686 |
| 1687 void Assembler::creqv(int bt, int ba, int bb) { |
| 1688 emit(EXT1 | CREQV | bt*B21 | ba*B16 | bb*B11); |
| 1689 } |
| 1690 |
| 1691 |
| 1692 void Assembler::mflr(Register dst) { |
| 1693 emit(EXT2 | MFSPR | dst.code()*B21 | 256 << 11); // Ignore RC bit |
| 1694 } |
| 1695 |
| 1696 |
| 1697 void Assembler::mtlr(Register src) { |
| 1698 emit(EXT2 | MTSPR | src.code()*B21 | 256 << 11); // Ignore RC bit |
| 1699 } |
| 1700 |
| 1701 |
| 1702 void Assembler::mtctr(Register src) { |
| 1703 emit(EXT2 | MTSPR | src.code()*B21 | 288 << 11); // Ignore RC bit |
| 1704 } |
| 1705 |
| 1706 |
| 1707 void Assembler::mtxer(Register src) { |
| 1708 emit(EXT2 | MTSPR | src.code()*B21 | 32 << 11); |
| 1709 } |
| 1710 |
| 1711 |
| 1712 void Assembler::mcrfs(int bf, int bfa) { |
| 1713 emit(EXT4 | MCRFS | bf*B23 | bfa*B18); |
| 1714 } |
| 1715 |
| 1716 |
| 1717 void Assembler::mfcr(Register dst) { |
| 1718 emit(EXT2 | MFCR | dst.code()*B21); |
| 1719 } |
| 1720 |
| 1721 |
| 1722 #if V8_TARGET_ARCH_PPC64 |
| 1723 void Assembler::mffprd(Register dst, DoubleRegister src) { |
| 1724 emit(EXT2 | MFVSRD | src.code()*B21 | dst.code()*B16); |
| 1725 } |
| 1726 |
| 1727 |
| 1728 void Assembler::mffprwz(Register dst, DoubleRegister src) { |
| 1729 emit(EXT2 | MFVSRWZ | src.code()*B21 | dst.code()*B16); |
| 1730 } |
| 1731 |
| 1732 |
| 1733 void Assembler::mtfprd(DoubleRegister dst, Register src) { |
| 1734 emit(EXT2 | MTVSRD | dst.code()*B21 | src.code()*B16); |
| 1735 } |
| 1736 |
| 1737 |
| 1738 void Assembler::mtfprwz(DoubleRegister dst, Register src) { |
| 1739 emit(EXT2 | MTVSRWZ | dst.code()*B21 | src.code()*B16); |
| 1740 } |
| 1741 |
| 1742 |
| 1743 void Assembler::mtfprwa(DoubleRegister dst, Register src) { |
| 1744 emit(EXT2 | MTVSRWA | dst.code()*B21 | src.code()*B16); |
| 1745 } |
| 1746 #endif |
| 1747 |
| 1748 |
| 1749 // Exception-generating instructions and debugging support. |
| 1750 // Stops with a non-negative code less than kNumOfWatchedStops support |
| 1751 // enabling/disabling and a counter feature. See simulator-ppc.h . |
| 1752 void Assembler::stop(const char* msg, Condition cond, int32_t code, |
| 1753 CRegister cr) { |
| 1754 if (cond != al) { |
| 1755 Label skip; |
| 1756 b(NegateCondition(cond), &skip, cr); |
| 1757 bkpt(0); |
| 1758 bind(&skip); |
| 1759 } else { |
| 1760 bkpt(0); |
| 1761 } |
| 1762 } |
| 1763 |
| 1764 |
| 1765 void Assembler::bkpt(uint32_t imm16) { |
| 1766 emit(0x7d821008); |
| 1767 } |
| 1768 |
| 1769 |
| 1770 void Assembler::dcbf(Register ra, Register rb) { |
| 1771 emit(EXT2 | DCBF | ra.code()*B16 | rb.code()*B11); |
| 1772 } |
| 1773 |
| 1774 |
| 1775 void Assembler::sync() { |
| 1776 emit(EXT2 | SYNC); |
| 1777 } |
| 1778 |
| 1779 |
| 1780 void Assembler::icbi(Register ra, Register rb) { |
| 1781 emit(EXT2 | ICBI | ra.code()*B16 | rb.code()*B11); |
| 1782 } |
| 1783 |
| 1784 |
| 1785 void Assembler::isync() { |
| 1786 emit(EXT1 | ISYNC); |
| 1787 } |
| 1788 |
| 1789 |
| 1790 // Floating point support |
| 1791 |
| 1792 void Assembler::lfd(const DoubleRegister frt, const MemOperand &src) { |
| 1793 int offset = src.offset(); |
| 1794 Register ra = src.ra(); |
| 1795 DCHECK(is_int16(offset)); |
| 1796 int imm16 = offset & kImm16Mask; |
| 1797 // could be x_form instruction with some casting magic |
| 1798 emit(LFD | frt.code()*B21 | ra.code()*B16 | imm16); |
| 1799 } |
| 1800 |
| 1801 |
| 1802 void Assembler::lfdu(const DoubleRegister frt, const MemOperand &src) { |
| 1803 int offset = src.offset(); |
| 1804 Register ra = src.ra(); |
| 1805 DCHECK(is_int16(offset)); |
| 1806 int imm16 = offset & kImm16Mask; |
| 1807 // could be x_form instruction with some casting magic |
| 1808 emit(LFDU | frt.code()*B21 | ra.code()*B16 | imm16); |
| 1809 } |
| 1810 |
| 1811 |
| 1812 void Assembler::lfdx(const DoubleRegister frt, const MemOperand &src) { |
| 1813 Register ra = src.ra(); |
| 1814 Register rb = src.rb(); |
| 1815 DCHECK(!ra.is(r0)); |
| 1816 emit(EXT2 | LFDX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1817 } |
| 1818 |
| 1819 |
| 1820 void Assembler::lfdux(const DoubleRegister frt, const MemOperand & src) { |
| 1821 Register ra = src.ra(); |
| 1822 Register rb = src.rb(); |
| 1823 DCHECK(!ra.is(r0)); |
| 1824 emit(EXT2 | LFDUX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1825 } |
| 1826 |
| 1827 |
| 1828 void Assembler::lfs(const DoubleRegister frt, const MemOperand &src) { |
| 1829 int offset = src.offset(); |
| 1830 Register ra = src.ra(); |
| 1831 DCHECK(is_int16(offset)); |
| 1832 DCHECK(!ra.is(r0)); |
| 1833 int imm16 = offset & kImm16Mask; |
| 1834 // could be x_form instruction with some casting magic |
| 1835 emit(LFS | frt.code()*B21 | ra.code()*B16 | imm16); |
| 1836 } |
| 1837 |
| 1838 |
| 1839 void Assembler::lfsu(const DoubleRegister frt, const MemOperand &src) { |
| 1840 int offset = src.offset(); |
| 1841 Register ra = src.ra(); |
| 1842 DCHECK(is_int16(offset)); |
| 1843 DCHECK(!ra.is(r0)); |
| 1844 int imm16 = offset & kImm16Mask; |
| 1845 // could be x_form instruction with some casting magic |
| 1846 emit(LFSU | frt.code()*B21 | ra.code()*B16 | imm16); |
| 1847 } |
| 1848 |
| 1849 |
| 1850 void Assembler::lfsx(const DoubleRegister frt, const MemOperand &src) { |
| 1851 Register ra = src.ra(); |
| 1852 Register rb = src.rb(); |
| 1853 DCHECK(!ra.is(r0)); |
| 1854 emit(EXT2 | LFSX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1855 } |
| 1856 |
| 1857 |
| 1858 void Assembler::lfsux(const DoubleRegister frt, const MemOperand & src) { |
| 1859 Register ra = src.ra(); |
| 1860 Register rb = src.rb(); |
| 1861 DCHECK(!ra.is(r0)); |
| 1862 emit(EXT2 | LFSUX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1863 } |
| 1864 |
| 1865 |
| 1866 void Assembler::stfd(const DoubleRegister frs, const MemOperand &src) { |
| 1867 int offset = src.offset(); |
| 1868 Register ra = src.ra(); |
| 1869 DCHECK(is_int16(offset)); |
| 1870 DCHECK(!ra.is(r0)); |
| 1871 int imm16 = offset & kImm16Mask; |
| 1872 // could be x_form instruction with some casting magic |
| 1873 emit(STFD | frs.code()*B21 | ra.code()*B16 | imm16); |
| 1874 } |
| 1875 |
| 1876 |
| 1877 void Assembler::stfdu(const DoubleRegister frs, const MemOperand &src) { |
| 1878 int offset = src.offset(); |
| 1879 Register ra = src.ra(); |
| 1880 DCHECK(is_int16(offset)); |
| 1881 DCHECK(!ra.is(r0)); |
| 1882 int imm16 = offset & kImm16Mask; |
| 1883 // could be x_form instruction with some casting magic |
| 1884 emit(STFDU | frs.code()*B21 | ra.code()*B16 | imm16); |
| 1885 } |
| 1886 |
| 1887 |
| 1888 void Assembler::stfdx(const DoubleRegister frs, const MemOperand &src) { |
| 1889 Register ra = src.ra(); |
| 1890 Register rb = src.rb(); |
| 1891 DCHECK(!ra.is(r0)); |
| 1892 emit(EXT2 | STFDX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1893 } |
| 1894 |
| 1895 |
| 1896 void Assembler::stfdux(const DoubleRegister frs, const MemOperand &src) { |
| 1897 Register ra = src.ra(); |
| 1898 Register rb = src.rb(); |
| 1899 DCHECK(!ra.is(r0)); |
| 1900 emit(EXT2 | STFDUX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); |
| 1901 } |
| 1902 |
| 1903 |
| 1904 void Assembler::stfs(const DoubleRegister frs, const MemOperand &src) { |
| 1905 int offset = src.offset(); |
| 1906 Register ra = src.ra(); |
| 1907 DCHECK(is_int16(offset)); |
| 1908 DCHECK(!ra.is(r0)); |
| 1909 int imm16 = offset & kImm16Mask; |
| 1910 // could be x_form instruction with some casting magic |
| 1911 emit(STFS | frs.code()*B21 | ra.code()*B16 | imm16); |
| 1912 } |
| 1913 |
| 1914 |
| 1915 void Assembler::stfsu(const DoubleRegister frs, const MemOperand &src) { |
| 1916 int offset = src.offset(); |
| 1917 Register ra = src.ra(); |
| 1918 DCHECK(is_int16(offset)); |
| 1919 DCHECK(!ra.is(r0)); |
| 1920 int imm16 = offset & kImm16Mask; |
| 1921 // could be x_form instruction with some casting magic |
| 1922 emit(STFSU | frs.code()*B21 | ra.code()*B16 | imm16); |
| 1923 } |
| 1924 |
| 1925 |
| 1926 void Assembler::stfsx(const DoubleRegister frs, const MemOperand &src) { |
| 1927 Register ra = src.ra(); |
| 1928 Register rb = src.rb(); |
| 1929 DCHECK(!ra.is(r0)); |
| 1930 emit(EXT2 | STFSX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); |
| 1931 } |
| 1932 |
| 1933 |
| 1934 void Assembler::stfsux(const DoubleRegister frs, const MemOperand &src) { |
| 1935 Register ra = src.ra(); |
| 1936 Register rb = src.rb(); |
| 1937 DCHECK(!ra.is(r0)); |
| 1938 emit(EXT2 | STFSUX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); |
| 1939 } |
| 1940 |
| 1941 |
| 1942 void Assembler::fsub(const DoubleRegister frt, |
| 1943 const DoubleRegister fra, |
| 1944 const DoubleRegister frb, |
| 1945 RCBit rc) { |
| 1946 a_form(EXT4 | FSUB, frt, fra, frb, rc); |
| 1947 } |
| 1948 |
| 1949 |
| 1950 void Assembler::fadd(const DoubleRegister frt, |
| 1951 const DoubleRegister fra, |
| 1952 const DoubleRegister frb, |
| 1953 RCBit rc) { |
| 1954 a_form(EXT4 | FADD, frt, fra, frb, rc); |
| 1955 } |
| 1956 |
| 1957 |
| 1958 void Assembler::fmul(const DoubleRegister frt, |
| 1959 const DoubleRegister fra, |
| 1960 const DoubleRegister frc, |
| 1961 RCBit rc) { |
| 1962 emit(EXT4 | FMUL | frt.code()*B21 | fra.code()*B16 | frc.code()*B6 | rc); |
| 1963 } |
| 1964 |
| 1965 |
| 1966 void Assembler::fdiv(const DoubleRegister frt, |
| 1967 const DoubleRegister fra, |
| 1968 const DoubleRegister frb, |
| 1969 RCBit rc) { |
| 1970 a_form(EXT4 | FDIV, frt, fra, frb, rc); |
| 1971 } |
| 1972 |
| 1973 |
| 1974 void Assembler::fcmpu(const DoubleRegister fra, |
| 1975 const DoubleRegister frb, |
| 1976 CRegister cr) { |
| 1977 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1978 emit(EXT4 | FCMPU | cr.code()*B23 | fra.code()*B16 | frb.code()*B11); |
| 1979 } |
| 1980 |
| 1981 |
| 1982 void Assembler::fmr(const DoubleRegister frt, |
| 1983 const DoubleRegister frb, |
| 1984 RCBit rc) { |
| 1985 emit(EXT4 | FMR | frt.code()*B21 | frb.code()*B11 | rc); |
| 1986 } |
| 1987 |
| 1988 |
| 1989 void Assembler::fctiwz(const DoubleRegister frt, |
| 1990 const DoubleRegister frb) { |
| 1991 emit(EXT4 | FCTIWZ | frt.code()*B21 | frb.code()*B11); |
| 1992 } |
| 1993 |
| 1994 |
| 1995 void Assembler::fctiw(const DoubleRegister frt, |
| 1996 const DoubleRegister frb) { |
| 1997 emit(EXT4 | FCTIW | frt.code()*B21 | frb.code()*B11); |
| 1998 } |
| 1999 |
| 2000 |
| 2001 void Assembler::frim(const DoubleRegister frt, |
| 2002 const DoubleRegister frb) { |
| 2003 emit(EXT4 | FRIM | frt.code()*B21 | frb.code()*B11); |
| 2004 } |
| 2005 |
| 2006 |
| 2007 void Assembler::frsp(const DoubleRegister frt, |
| 2008 const DoubleRegister frb, |
| 2009 RCBit rc) { |
| 2010 emit(EXT4 | FRSP | frt.code()*B21 | frb.code()*B11 | rc); |
| 2011 } |
| 2012 |
| 2013 |
| 2014 void Assembler::fcfid(const DoubleRegister frt, |
| 2015 const DoubleRegister frb, |
| 2016 RCBit rc) { |
| 2017 emit(EXT4 | FCFID | frt.code()*B21 | frb.code()*B11 | rc); |
| 2018 } |
| 2019 |
| 2020 |
| 2021 void Assembler::fctid(const DoubleRegister frt, |
| 2022 const DoubleRegister frb, |
| 2023 RCBit rc) { |
| 2024 emit(EXT4 | FCTID | frt.code()*B21 | frb.code()*B11 | rc); |
| 2025 } |
| 2026 |
| 2027 |
| 2028 void Assembler::fctidz(const DoubleRegister frt, |
| 2029 const DoubleRegister frb, |
| 2030 RCBit rc) { |
| 2031 emit(EXT4 | FCTIDZ | frt.code()*B21 | frb.code()*B11 | rc); |
| 2032 } |
| 2033 |
| 2034 |
| 2035 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra, |
| 2036 const DoubleRegister frc, const DoubleRegister frb, |
| 2037 RCBit rc) { |
| 2038 emit(EXT4 | FSEL | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | |
| 2039 frc.code()*B6 | rc); |
| 2040 } |
| 2041 |
| 2042 |
| 2043 void Assembler::fneg(const DoubleRegister frt, |
| 2044 const DoubleRegister frb, |
| 2045 RCBit rc) { |
| 2046 emit(EXT4 | FNEG | frt.code()*B21 | frb.code()*B11 | rc); |
| 2047 } |
| 2048 |
| 2049 |
| 2050 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) { |
| 2051 emit(EXT4 | MTFSFI | bf*B23 | immediate*B12 | rc); |
| 2052 } |
| 2053 |
| 2054 |
| 2055 void Assembler::mffs(const DoubleRegister frt, RCBit rc) { |
| 2056 emit(EXT4 | MFFS | frt.code()*B21 | rc); |
| 2057 } |
| 2058 |
| 2059 |
| 2060 void Assembler::mtfsf(const DoubleRegister frb, bool L, |
| 2061 int FLM, bool W, RCBit rc) { |
| 2062 emit(EXT4 | MTFSF | frb.code()*B11 | W*B16 | FLM*B17 | L*B25 | rc); |
| 2063 } |
| 2064 |
| 2065 |
| 2066 void Assembler::fsqrt(const DoubleRegister frt, |
| 2067 const DoubleRegister frb, |
| 2068 RCBit rc) { |
| 2069 emit(EXT4 | FSQRT | frt.code()*B21 | frb.code()*B11 | rc); |
| 2070 } |
| 2071 |
| 2072 |
| 2073 void Assembler::fabs(const DoubleRegister frt, |
| 2074 const DoubleRegister frb, |
| 2075 RCBit rc) { |
| 2076 emit(EXT4 | FABS | frt.code()*B21 | frb.code()*B11 | rc); |
| 2077 } |
| 2078 |
| 2079 |
| 2080 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra, |
| 2081 const DoubleRegister frc, const DoubleRegister frb, |
| 2082 RCBit rc) { |
| 2083 emit(EXT4 | FMADD | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | |
| 2084 frc.code()*B6 | rc); |
| 2085 } |
| 2086 |
| 2087 |
| 2088 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra, |
| 2089 const DoubleRegister frc, const DoubleRegister frb, |
| 2090 RCBit rc) { |
| 2091 emit(EXT4 | FMSUB | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | |
| 2092 frc.code()*B6 | rc); |
| 2093 } |
| 2094 |
| 2095 |
| 2096 // Pseudo instructions. |
| 2097 void Assembler::nop(int type) { |
| 2098 switch (type) { |
| 2099 case 0: |
| 2100 ori(r0, r0, Operand::Zero()); |
| 2101 break; |
| 2102 case DEBUG_BREAK_NOP: |
| 2103 ori(r3, r3, Operand::Zero()); |
| 2104 break; |
| 2105 default: |
| 2106 UNIMPLEMENTED(); |
| 2107 } |
| 2108 } |
| 2109 |
| 2110 |
| 2111 bool Assembler::IsNop(Instr instr, int type) { |
| 2112 DCHECK((0 == type) || (DEBUG_BREAK_NOP == type)); |
| 2113 int reg = 0; |
| 2114 if (DEBUG_BREAK_NOP == type) { |
| 2115 reg = 3; |
| 2116 } |
| 2117 return instr == (ORI | reg*B21 | reg*B16); |
| 2118 } |
| 2119 |
| 2120 |
| 2121 // Debugging. |
| 2122 void Assembler::RecordJSReturn() { |
| 2123 positions_recorder()->WriteRecordedPositions(); |
| 2124 CheckBuffer(); |
| 2125 RecordRelocInfo(RelocInfo::JS_RETURN); |
| 2126 } |
| 2127 |
| 2128 |
| 2129 void Assembler::RecordDebugBreakSlot() { |
| 2130 positions_recorder()->WriteRecordedPositions(); |
| 2131 CheckBuffer(); |
| 2132 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); |
| 2133 } |
| 2134 |
| 2135 |
| 2136 void Assembler::RecordComment(const char* msg) { |
| 2137 if (FLAG_code_comments) { |
| 2138 CheckBuffer(); |
| 2139 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
| 2140 } |
| 2141 } |
| 2142 |
| 2143 |
| 2144 void Assembler::GrowBuffer() { |
| 2145 if (!own_buffer_) FATAL("external code buffer is too small"); |
| 2146 |
| 2147 // Compute new buffer size. |
| 2148 CodeDesc desc; // the new buffer |
| 2149 if (buffer_size_ < 4*KB) { |
| 2150 desc.buffer_size = 4*KB; |
| 2151 } else if (buffer_size_ < 1*MB) { |
| 2152 desc.buffer_size = 2*buffer_size_; |
| 2153 } else { |
| 2154 desc.buffer_size = buffer_size_ + 1*MB; |
| 2155 } |
| 2156 CHECK_GT(desc.buffer_size, 0); // no overflow |
| 2157 |
| 2158 // Set up new buffer. |
| 2159 desc.buffer = NewArray<byte>(desc.buffer_size); |
| 2160 |
| 2161 desc.instr_size = pc_offset(); |
| 2162 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 2163 |
| 2164 // Copy the data. |
| 2165 intptr_t pc_delta = desc.buffer - buffer_; |
| 2166 intptr_t rc_delta = (desc.buffer + desc.buffer_size) - |
| 2167 (buffer_ + buffer_size_); |
| 2168 memmove(desc.buffer, buffer_, desc.instr_size); |
| 2169 memmove(reloc_info_writer.pos() + rc_delta, |
| 2170 reloc_info_writer.pos(), desc.reloc_size); |
| 2171 |
| 2172 // Switch buffers. |
| 2173 DeleteArray(buffer_); |
| 2174 buffer_ = desc.buffer; |
| 2175 buffer_size_ = desc.buffer_size; |
| 2176 pc_ += pc_delta; |
| 2177 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
| 2178 reloc_info_writer.last_pc() + pc_delta); |
| 2179 |
| 2180 // None of our relocation types are pc relative pointing outside the code |
| 2181 // buffer nor pc absolute pointing inside the code buffer, so there is no need |
| 2182 // to relocate any emitted relocation entries. |
| 2183 |
| 2184 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 2185 // Relocate runtime entries. |
| 2186 for (RelocIterator it(desc); !it.done(); it.next()) { |
| 2187 RelocInfo::Mode rmode = it.rinfo()->rmode(); |
| 2188 if (rmode == RelocInfo::INTERNAL_REFERENCE) { |
| 2189 RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0); |
| 2190 } |
| 2191 } |
| 2192 #if V8_OOL_CONSTANT_POOL |
| 2193 constant_pool_builder_.Relocate(pc_delta); |
| 2194 #endif |
| 2195 #endif |
| 2196 } |
| 2197 |
| 2198 |
| 2199 void Assembler::db(uint8_t data) { |
| 2200 CheckBuffer(); |
| 2201 *reinterpret_cast<uint8_t*>(pc_) = data; |
| 2202 pc_ += sizeof(uint8_t); |
| 2203 } |
| 2204 |
| 2205 |
| 2206 void Assembler::dd(uint32_t data) { |
| 2207 CheckBuffer(); |
| 2208 *reinterpret_cast<uint32_t*>(pc_) = data; |
| 2209 pc_ += sizeof(uint32_t); |
| 2210 } |
| 2211 |
| 2212 |
| 2213 void Assembler::emit_ptr(uintptr_t data) { |
| 2214 CheckBuffer(); |
| 2215 *reinterpret_cast<uintptr_t*>(pc_) = data; |
| 2216 pc_ += sizeof(uintptr_t); |
| 2217 } |
| 2218 |
| 2219 |
| 2220 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 2221 RelocInfo rinfo(pc_, rmode, data, NULL); |
| 2222 RecordRelocInfo(rinfo); |
| 2223 } |
| 2224 |
| 2225 |
| 2226 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { |
| 2227 if (rinfo.rmode() >= RelocInfo::JS_RETURN && |
| 2228 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) { |
| 2229 // Adjust code for new modes. |
| 2230 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) |
| 2231 || RelocInfo::IsJSReturn(rinfo.rmode()) |
| 2232 || RelocInfo::IsComment(rinfo.rmode()) |
| 2233 || RelocInfo::IsPosition(rinfo.rmode())); |
| 2234 } |
| 2235 if (!RelocInfo::IsNone(rinfo.rmode())) { |
| 2236 // Don't record external references unless the heap will be serialized. |
| 2237 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { |
| 2238 if (!serializer_enabled() && !emit_debug_code()) { |
| 2239 return; |
| 2240 } |
| 2241 } |
| 2242 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
| 2243 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { |
| 2244 RelocInfo reloc_info_with_ast_id(rinfo.pc(), |
| 2245 rinfo.rmode(), |
| 2246 RecordedAstId().ToInt(), |
| 2247 NULL); |
| 2248 ClearRecordedAstId(); |
| 2249 reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 2250 } else { |
| 2251 reloc_info_writer.Write(&rinfo); |
| 2252 } |
| 2253 } |
| 2254 } |
| 2255 |
| 2256 |
| 2257 void Assembler::BlockTrampolinePoolFor(int instructions) { |
| 2258 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); |
| 2259 } |
| 2260 |
| 2261 |
| 2262 void Assembler::CheckTrampolinePool() { |
| 2263 // Some small sequences of instructions must not be broken up by the |
| 2264 // insertion of a trampoline pool; such sequences are protected by setting |
| 2265 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, |
| 2266 // which are both checked here. Also, recursive calls to CheckTrampolinePool |
| 2267 // are blocked by trampoline_pool_blocked_nesting_. |
| 2268 if ((trampoline_pool_blocked_nesting_ > 0) || |
| 2269 (pc_offset() < no_trampoline_pool_before_)) { |
| 2270 // Emission is currently blocked; make sure we try again as soon as |
| 2271 // possible. |
| 2272 if (trampoline_pool_blocked_nesting_ > 0) { |
| 2273 next_buffer_check_ = pc_offset() + kInstrSize; |
| 2274 } else { |
| 2275 next_buffer_check_ = no_trampoline_pool_before_; |
| 2276 } |
| 2277 return; |
| 2278 } |
| 2279 |
| 2280 DCHECK(!trampoline_emitted_); |
| 2281 DCHECK(unbound_labels_count_ >= 0); |
| 2282 if (unbound_labels_count_ > 0) { |
| 2283 // First we emit jump, then we emit trampoline pool. |
| 2284 { BlockTrampolinePoolScope block_trampoline_pool(this); |
| 2285 Label after_pool; |
| 2286 b(&after_pool); |
| 2287 |
| 2288 int pool_start = pc_offset(); |
| 2289 for (int i = 0; i < unbound_labels_count_; i++) { |
| 2290 b(&after_pool); |
| 2291 } |
| 2292 bind(&after_pool); |
| 2293 trampoline_ = Trampoline(pool_start, unbound_labels_count_); |
| 2294 |
| 2295 trampoline_emitted_ = true; |
| 2296 // As we are only going to emit trampoline once, we need to prevent any |
| 2297 // further emission. |
| 2298 next_buffer_check_ = kMaxInt; |
| 2299 } |
| 2300 } else { |
| 2301 // Number of branches to unbound label at this point is zero, so we can |
| 2302 // move next buffer check to maximum. |
| 2303 next_buffer_check_ = pc_offset() + |
| 2304 kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; |
| 2305 } |
| 2306 return; |
| 2307 } |
| 2308 |
| 2309 |
| 2310 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { |
| 2311 #if V8_OOL_CONSTANT_POOL |
| 2312 return constant_pool_builder_.New(isolate); |
| 2313 #else |
| 2314 // No out-of-line constant pool support. |
| 2315 DCHECK(!FLAG_enable_ool_constant_pool); |
| 2316 return isolate->factory()->empty_constant_pool_array(); |
| 2317 #endif |
| 2318 } |
| 2319 |
| 2320 |
| 2321 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { |
| 2322 #if V8_OOL_CONSTANT_POOL |
| 2323 constant_pool_builder_.Populate(this, constant_pool); |
| 2324 #else |
| 2325 // No out-of-line constant pool support. |
| 2326 DCHECK(!FLAG_enable_ool_constant_pool); |
| 2327 #endif |
| 2328 } |
| 2329 |
| 2330 |
| 2331 #if V8_OOL_CONSTANT_POOL |
| 2332 ConstantPoolBuilder::ConstantPoolBuilder() |
| 2333 : size_(0), |
| 2334 entries_(), |
| 2335 current_section_(ConstantPoolArray::SMALL_SECTION) { |
| 2336 } |
| 2337 |
| 2338 |
| 2339 bool ConstantPoolBuilder::IsEmpty() { |
| 2340 return entries_.size() == 0; |
| 2341 } |
| 2342 |
| 2343 |
| 2344 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( |
| 2345 RelocInfo::Mode rmode) { |
| 2346 #if V8_TARGET_ARCH_PPC64 |
| 2347 // We don't support 32-bit entries at this time. |
| 2348 if (!RelocInfo::IsGCRelocMode(rmode)) { |
| 2349 return ConstantPoolArray::INT64; |
| 2350 #else |
| 2351 if (rmode == RelocInfo::NONE64) { |
| 2352 return ConstantPoolArray::INT64; |
| 2353 } else if (!RelocInfo::IsGCRelocMode(rmode)) { |
| 2354 return ConstantPoolArray::INT32; |
| 2355 #endif |
| 2356 } else if (RelocInfo::IsCodeTarget(rmode)) { |
| 2357 return ConstantPoolArray::CODE_PTR; |
| 2358 } else { |
| 2359 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); |
| 2360 return ConstantPoolArray::HEAP_PTR; |
| 2361 } |
| 2362 } |
| 2363 |
| 2364 |
| 2365 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( |
| 2366 Assembler* assm, const RelocInfo& rinfo) { |
| 2367 RelocInfo::Mode rmode = rinfo.rmode(); |
| 2368 DCHECK(rmode != RelocInfo::COMMENT && |
| 2369 rmode != RelocInfo::POSITION && |
| 2370 rmode != RelocInfo::STATEMENT_POSITION && |
| 2371 rmode != RelocInfo::CONST_POOL); |
| 2372 |
| 2373 // Try to merge entries which won't be patched. |
| 2374 int merged_index = -1; |
| 2375 ConstantPoolArray::LayoutSection entry_section = current_section_; |
| 2376 if (RelocInfo::IsNone(rmode) || |
| 2377 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { |
| 2378 size_t i; |
| 2379 std::vector<ConstantPoolEntry>::const_iterator it; |
| 2380 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { |
| 2381 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { |
| 2382 // Merge with found entry. |
| 2383 merged_index = i; |
| 2384 entry_section = entries_[i].section_; |
| 2385 break; |
| 2386 } |
| 2387 } |
| 2388 } |
| 2389 DCHECK(entry_section <= current_section_); |
| 2390 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); |
| 2391 |
| 2392 if (merged_index == -1) { |
| 2393 // Not merged, so update the appropriate count. |
| 2394 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); |
| 2395 } |
| 2396 |
| 2397 // Check if we still have room for another entry in the small section |
| 2398 // given the limitations of the header's layout fields. |
| 2399 if (current_section_ == ConstantPoolArray::SMALL_SECTION) { |
| 2400 size_ = ConstantPoolArray::SizeFor(*small_entries()); |
| 2401 if (!is_uint12(size_)) { |
| 2402 current_section_ = ConstantPoolArray::EXTENDED_SECTION; |
| 2403 } |
| 2404 } else { |
| 2405 size_ = ConstantPoolArray::SizeForExtended(*small_entries(), |
| 2406 *extended_entries()); |
| 2407 } |
| 2408 |
| 2409 return entry_section; |
| 2410 } |
| 2411 |
| 2412 |
| 2413 void ConstantPoolBuilder::Relocate(intptr_t pc_delta) { |
| 2414 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
| 2415 entry != entries_.end(); entry++) { |
| 2416 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); |
| 2417 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); |
| 2418 } |
| 2419 } |
| 2420 |
| 2421 |
| 2422 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { |
| 2423 if (IsEmpty()) { |
| 2424 return isolate->factory()->empty_constant_pool_array(); |
| 2425 } else if (extended_entries()->is_empty()) { |
| 2426 return isolate->factory()->NewConstantPoolArray(*small_entries()); |
| 2427 } else { |
| 2428 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION); |
| 2429 return isolate->factory()->NewExtendedConstantPoolArray( |
| 2430 *small_entries(), *extended_entries()); |
| 2431 } |
| 2432 } |
| 2433 |
| 2434 |
| 2435 void ConstantPoolBuilder::Populate(Assembler* assm, |
| 2436 ConstantPoolArray* constant_pool) { |
| 2437 DCHECK_EQ(extended_entries()->is_empty(), |
| 2438 !constant_pool->is_extended_layout()); |
| 2439 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries( |
| 2440 constant_pool, ConstantPoolArray::SMALL_SECTION))); |
| 2441 if (constant_pool->is_extended_layout()) { |
| 2442 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( |
| 2443 constant_pool, ConstantPoolArray::EXTENDED_SECTION))); |
| 2444 } |
| 2445 |
| 2446 // Set up initial offsets. |
| 2447 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS] |
| 2448 [ConstantPoolArray::NUMBER_OF_TYPES]; |
| 2449 for (int section = 0; section <= constant_pool->final_section(); section++) { |
| 2450 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION) |
| 2451 ? small_entries()->total_count() |
| 2452 : 0; |
| 2453 for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) { |
| 2454 ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i); |
| 2455 if (number_of_entries_[section].count_of(type) != 0) { |
| 2456 offsets[section][type] = constant_pool->OffsetOfElementAt( |
| 2457 number_of_entries_[section].base_of(type) + section_start); |
| 2458 } |
| 2459 } |
| 2460 } |
| 2461 |
| 2462 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
| 2463 entry != entries_.end(); entry++) { |
| 2464 RelocInfo rinfo = entry->rinfo_; |
| 2465 RelocInfo::Mode rmode = entry->rinfo_.rmode(); |
| 2466 ConstantPoolArray::Type type = GetConstantPoolType(rmode); |
| 2467 |
| 2468 // Update constant pool if necessary and get the entry's offset. |
| 2469 int offset; |
| 2470 if (entry->merged_index_ == -1) { |
| 2471 offset = offsets[entry->section_][type]; |
| 2472 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type); |
| 2473 if (type == ConstantPoolArray::INT64) { |
| 2474 #if V8_TARGET_ARCH_PPC64 |
| 2475 constant_pool->set_at_offset(offset, rinfo.data()); |
| 2476 #else |
| 2477 constant_pool->set_at_offset(offset, rinfo.data64()); |
| 2478 } else if (type == ConstantPoolArray::INT32) { |
| 2479 constant_pool->set_at_offset(offset, |
| 2480 static_cast<int32_t>(rinfo.data())); |
| 2481 #endif |
| 2482 } else if (type == ConstantPoolArray::CODE_PTR) { |
| 2483 constant_pool->set_at_offset(offset, |
| 2484 reinterpret_cast<Address>(rinfo.data())); |
| 2485 } else { |
| 2486 DCHECK(type == ConstantPoolArray::HEAP_PTR); |
| 2487 constant_pool->set_at_offset(offset, |
| 2488 reinterpret_cast<Object*>(rinfo.data())); |
| 2489 } |
| 2490 offset -= kHeapObjectTag; |
| 2491 entry->merged_index_ = offset; // Stash offset for merged entries. |
| 2492 } else { |
| 2493 DCHECK(entry->merged_index_ < (entry - entries_.begin())); |
| 2494 offset = entries_[entry->merged_index_].merged_index_; |
| 2495 } |
| 2496 |
| 2497 // Patch load instruction with correct offset. |
| 2498 Assembler::SetConstantPoolOffset(rinfo.pc(), offset); |
| 2499 } |
| 2500 } |
| 2501 #endif |
| 2502 |
| 2503 |
| 2504 } } // namespace v8::internal |
| 2505 |
| 2506 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |