OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions |
| 6 // are met: |
| 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. |
| 10 // |
| 11 // - Redistribution in binary form must reproduce the above copyright |
| 12 // notice, this list of conditions and the following disclaimer in the |
| 13 // documentation and/or other materials provided with the |
| 14 // distribution. |
| 15 // |
| 16 // - Neither the name of Sun Microsystems or the names of contributors may |
| 17 // be used to endorse or promote products derived from this software without |
| 18 // specific prior written permission. |
| 19 // |
| 20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
| 31 // OF THE POSSIBILITY OF SUCH DAMAGE. |
| 32 |
| 33 // The original source code covered by the above license above has been |
| 34 // modified significantly by Google Inc. |
| 35 // Copyright 2014 the V8 project authors. All rights reserved. |
| 36 |
| 37 #include "src/v8.h" |
| 38 |
| 39 #if V8_TARGET_ARCH_PPC |
| 40 |
| 41 #include "src/base/bits.h" |
| 42 #include "src/base/cpu.h" |
| 43 #include "src/macro-assembler.h" |
| 44 #include "src/ppc/assembler-ppc-inl.h" |
| 45 #include "src/serialize.h" |
| 46 |
| 47 namespace v8 { |
| 48 namespace internal { |
| 49 |
| 50 // Get the CPU features enabled by the build. |
| 51 static unsigned CpuFeaturesImpliedByCompiler() { |
| 52 unsigned answer = 0; |
| 53 return answer; |
| 54 } |
| 55 |
| 56 |
| 57 void CpuFeatures::ProbeImpl(bool cross_compile) { |
| 58 supported_ |= CpuFeaturesImpliedByCompiler(); |
| 59 cache_line_size_ = 128; |
| 60 |
| 61 // Only use statically determined features for cross compile (snapshot). |
| 62 if (cross_compile) return; |
| 63 |
| 64 // Detect whether frim instruction is supported (POWER5+) |
| 65 // For now we will just check for processors we know do not |
| 66 // support it |
| 67 #ifndef USE_SIMULATOR |
| 68 // Probe for additional features at runtime. |
| 69 base::CPU cpu; |
| 70 #if V8_TARGET_ARCH_PPC64 |
| 71 if (cpu.part() == base::CPU::PPC_POWER8) { |
| 72 supported_ |= (1u << FPR_GPR_MOV); |
| 73 } |
| 74 #endif |
| 75 if (cpu.part() == base::CPU::PPC_POWER6 || |
| 76 cpu.part() == base::CPU::PPC_POWER7 || |
| 77 cpu.part() == base::CPU::PPC_POWER8) { |
| 78 supported_ |= (1u << LWSYNC); |
| 79 } |
| 80 #if V8_OS_LINUX |
| 81 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) { |
| 82 // Assume support |
| 83 supported_ |= (1u << FPU); |
| 84 } |
| 85 if (cpu.cache_line_size() != 0) { |
| 86 cache_line_size_ = cpu.cache_line_size(); |
| 87 } |
| 88 #elif V8_OS_AIX |
| 89 // Assume support FP support and default cache line size |
| 90 supported_ |= (1u << FPU); |
| 91 #endif |
| 92 #else // Simulator |
| 93 supported_ |= (1u << FPU); |
| 94 supported_ |= (1u << LWSYNC); |
| 95 #if V8_TARGET_ARCH_PPC64 |
| 96 supported_ |= (1u << FPR_GPR_MOV); |
| 97 #endif |
| 98 #endif |
| 99 } |
| 100 |
| 101 |
| 102 void CpuFeatures::PrintTarget() { |
| 103 const char* ppc_arch = NULL; |
| 104 |
| 105 #if V8_TARGET_ARCH_PPC64 |
| 106 ppc_arch = "ppc64"; |
| 107 #else |
| 108 ppc_arch = "ppc"; |
| 109 #endif |
| 110 |
| 111 printf("target %s\n", ppc_arch); |
| 112 } |
| 113 |
| 114 |
| 115 void CpuFeatures::PrintFeatures() { |
| 116 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU)); |
| 117 } |
| 118 |
| 119 |
| 120 Register ToRegister(int num) { |
| 121 DCHECK(num >= 0 && num < kNumRegisters); |
| 122 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7, |
| 123 r8, r9, r10, r11, ip, r13, r14, r15, |
| 124 r16, r17, r18, r19, r20, r21, r22, r23, |
| 125 r24, r25, r26, r27, r28, r29, r30, fp}; |
| 126 return kRegisters[num]; |
| 127 } |
| 128 |
| 129 |
| 130 const char* DoubleRegister::AllocationIndexToString(int index) { |
| 131 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 132 const char* const names[] = { |
| 133 "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", |
| 134 "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", |
| 135 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"}; |
| 136 return names[index]; |
| 137 } |
| 138 |
| 139 |
| 140 // ----------------------------------------------------------------------------- |
| 141 // Implementation of RelocInfo |
| 142 |
| 143 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; |
| 144 |
| 145 |
| 146 bool RelocInfo::IsCodedSpecially() { |
| 147 // The deserializer needs to know whether a pointer is specially |
| 148 // coded. Being specially coded on PPC means that it is a lis/ori |
| 149 // instruction sequence or is an out of line constant pool entry, |
| 150 // and these are always the case inside code objects. |
| 151 return true; |
| 152 } |
| 153 |
| 154 |
| 155 bool RelocInfo::IsInConstantPool() { |
| 156 #if V8_OOL_CONSTANT_POOL |
| 157 return Assembler::IsConstantPoolLoadStart(pc_); |
| 158 #else |
| 159 return false; |
| 160 #endif |
| 161 } |
| 162 |
| 163 |
| 164 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { |
| 165 // Patch the code at the current address with the supplied instructions. |
| 166 Instr* pc = reinterpret_cast<Instr*>(pc_); |
| 167 Instr* instr = reinterpret_cast<Instr*>(instructions); |
| 168 for (int i = 0; i < instruction_count; i++) { |
| 169 *(pc + i) = *(instr + i); |
| 170 } |
| 171 |
| 172 // Indicate that code has changed. |
| 173 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize); |
| 174 } |
| 175 |
| 176 |
| 177 // Patch the code at the current PC with a call to the target address. |
| 178 // Additional guard instructions can be added if required. |
| 179 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { |
| 180 // Patch the code at the current address with a call to the target. |
| 181 UNIMPLEMENTED(); |
| 182 } |
| 183 |
| 184 |
| 185 // ----------------------------------------------------------------------------- |
| 186 // Implementation of Operand and MemOperand |
| 187 // See assembler-ppc-inl.h for inlined constructors |
| 188 |
| 189 Operand::Operand(Handle<Object> handle) { |
| 190 AllowDeferredHandleDereference using_raw_address; |
| 191 rm_ = no_reg; |
| 192 // Verify all Objects referred by code are NOT in new space. |
| 193 Object* obj = *handle; |
| 194 if (obj->IsHeapObject()) { |
| 195 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
| 196 imm_ = reinterpret_cast<intptr_t>(handle.location()); |
| 197 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
| 198 } else { |
| 199 // no relocation needed |
| 200 imm_ = reinterpret_cast<intptr_t>(obj); |
| 201 rmode_ = kRelocInfo_NONEPTR; |
| 202 } |
| 203 } |
| 204 |
| 205 |
| 206 MemOperand::MemOperand(Register rn, int32_t offset) { |
| 207 ra_ = rn; |
| 208 rb_ = no_reg; |
| 209 offset_ = offset; |
| 210 } |
| 211 |
| 212 |
| 213 MemOperand::MemOperand(Register ra, Register rb) { |
| 214 ra_ = ra; |
| 215 rb_ = rb; |
| 216 offset_ = 0; |
| 217 } |
| 218 |
| 219 |
| 220 // ----------------------------------------------------------------------------- |
| 221 // Specific instructions, constants, and masks. |
| 222 |
| 223 // Spare buffer. |
| 224 static const int kMinimalBufferSize = 4 * KB; |
| 225 |
| 226 |
| 227 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 228 : AssemblerBase(isolate, buffer, buffer_size), |
| 229 recorded_ast_id_(TypeFeedbackId::None()), |
| 230 #if V8_OOL_CONSTANT_POOL |
| 231 constant_pool_builder_(), |
| 232 #endif |
| 233 positions_recorder_(this) { |
| 234 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); |
| 235 |
| 236 no_trampoline_pool_before_ = 0; |
| 237 trampoline_pool_blocked_nesting_ = 0; |
| 238 // We leave space (kMaxBlockTrampolineSectionSize) |
| 239 // for BlockTrampolinePoolScope buffer. |
| 240 next_buffer_check_ = |
| 241 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach - |
| 242 kMaxBlockTrampolineSectionSize; |
| 243 internal_trampoline_exception_ = false; |
| 244 last_bound_pos_ = 0; |
| 245 trampoline_emitted_ = FLAG_force_long_branches; |
| 246 unbound_labels_count_ = 0; |
| 247 ClearRecordedAstId(); |
| 248 } |
| 249 |
| 250 |
| 251 void Assembler::GetCode(CodeDesc* desc) { |
| 252 // Set up code descriptor. |
| 253 desc->buffer = buffer_; |
| 254 desc->buffer_size = buffer_size_; |
| 255 desc->instr_size = pc_offset(); |
| 256 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 257 desc->origin = this; |
| 258 } |
| 259 |
| 260 |
| 261 void Assembler::Align(int m) { |
| 262 #if V8_TARGET_ARCH_PPC64 |
| 263 DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m)); |
| 264 #else |
| 265 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m)); |
| 266 #endif |
| 267 while ((pc_offset() & (m - 1)) != 0) { |
| 268 nop(); |
| 269 } |
| 270 } |
| 271 |
| 272 |
| 273 void Assembler::CodeTargetAlign() { Align(8); } |
| 274 |
| 275 |
| 276 Condition Assembler::GetCondition(Instr instr) { |
| 277 switch (instr & kCondMask) { |
| 278 case BT: |
| 279 return eq; |
| 280 case BF: |
| 281 return ne; |
| 282 default: |
| 283 UNIMPLEMENTED(); |
| 284 } |
| 285 return al; |
| 286 } |
| 287 |
| 288 |
| 289 bool Assembler::IsLis(Instr instr) { |
| 290 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0); |
| 291 } |
| 292 |
| 293 |
| 294 bool Assembler::IsLi(Instr instr) { |
| 295 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0); |
| 296 } |
| 297 |
| 298 |
| 299 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; } |
| 300 |
| 301 |
| 302 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; } |
| 303 |
| 304 |
| 305 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); } |
| 306 |
| 307 |
| 308 Register Assembler::GetRA(Instr instr) { |
| 309 Register reg; |
| 310 reg.code_ = Instruction::RAValue(instr); |
| 311 return reg; |
| 312 } |
| 313 |
| 314 |
| 315 Register Assembler::GetRB(Instr instr) { |
| 316 Register reg; |
| 317 reg.code_ = Instruction::RBValue(instr); |
| 318 return reg; |
| 319 } |
| 320 |
| 321 |
| 322 #if V8_TARGET_ARCH_PPC64 |
| 323 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori) |
| 324 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3, |
| 325 Instr instr4, Instr instr5) { |
| 326 // Check the instructions are indeed a five part load (into r12) |
| 327 // 3d800000 lis r12, 0 |
| 328 // 618c0000 ori r12, r12, 0 |
| 329 // 798c07c6 rldicr r12, r12, 32, 31 |
| 330 // 658c00c3 oris r12, r12, 195 |
| 331 // 618ccd40 ori r12, r12, 52544 |
| 332 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) && |
| 333 (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) && |
| 334 ((instr5 >> 16) == 0x618c)); |
| 335 } |
| 336 #else |
| 337 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori) |
| 338 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) { |
| 339 // Check the instruction is indeed a two part load (into r12) |
| 340 // 3d802553 lis r12, 9555 |
| 341 // 618c5000 ori r12, r12, 20480 |
| 342 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c)); |
| 343 } |
| 344 #endif |
| 345 |
| 346 |
| 347 bool Assembler::IsCmpRegister(Instr instr) { |
| 348 return (((instr & kOpcodeMask) == EXT2) && |
| 349 ((instr & kExt2OpcodeMask) == CMP)); |
| 350 } |
| 351 |
| 352 |
| 353 bool Assembler::IsRlwinm(Instr instr) { |
| 354 return ((instr & kOpcodeMask) == RLWINMX); |
| 355 } |
| 356 |
| 357 |
| 358 #if V8_TARGET_ARCH_PPC64 |
| 359 bool Assembler::IsRldicl(Instr instr) { |
| 360 return (((instr & kOpcodeMask) == EXT5) && |
| 361 ((instr & kExt5OpcodeMask) == RLDICL)); |
| 362 } |
| 363 #endif |
| 364 |
| 365 |
| 366 bool Assembler::IsCmpImmediate(Instr instr) { |
| 367 return ((instr & kOpcodeMask) == CMPI); |
| 368 } |
| 369 |
| 370 |
| 371 bool Assembler::IsCrSet(Instr instr) { |
| 372 return (((instr & kOpcodeMask) == EXT1) && |
| 373 ((instr & kExt1OpcodeMask) == CREQV)); |
| 374 } |
| 375 |
| 376 |
| 377 Register Assembler::GetCmpImmediateRegister(Instr instr) { |
| 378 DCHECK(IsCmpImmediate(instr)); |
| 379 return GetRA(instr); |
| 380 } |
| 381 |
| 382 |
| 383 int Assembler::GetCmpImmediateRawImmediate(Instr instr) { |
| 384 DCHECK(IsCmpImmediate(instr)); |
| 385 return instr & kOff16Mask; |
| 386 } |
| 387 |
| 388 |
| 389 // Labels refer to positions in the (to be) generated code. |
| 390 // There are bound, linked, and unused labels. |
| 391 // |
| 392 // Bound labels refer to known positions in the already |
| 393 // generated code. pos() is the position the label refers to. |
| 394 // |
| 395 // Linked labels refer to unknown positions in the code |
| 396 // to be generated; pos() is the position of the last |
| 397 // instruction using the label. |
| 398 |
| 399 |
| 400 // The link chain is terminated by a negative code position (must be aligned) |
| 401 const int kEndOfChain = -4; |
| 402 |
| 403 |
| 404 int Assembler::target_at(int pos) { |
| 405 Instr instr = instr_at(pos); |
| 406 // check which type of branch this is 16 or 26 bit offset |
| 407 int opcode = instr & kOpcodeMask; |
| 408 if (BX == opcode) { |
| 409 int imm26 = ((instr & kImm26Mask) << 6) >> 6; |
| 410 imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present |
| 411 if (imm26 == 0) return kEndOfChain; |
| 412 return pos + imm26; |
| 413 } else if (BCX == opcode) { |
| 414 int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask)); |
| 415 imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present |
| 416 if (imm16 == 0) return kEndOfChain; |
| 417 return pos + imm16; |
| 418 } else if ((instr & ~kImm26Mask) == 0) { |
| 419 // Emitted link to a label, not part of a branch (regexp PushBacktrack). |
| 420 if (instr == 0) { |
| 421 return kEndOfChain; |
| 422 } else { |
| 423 int32_t imm26 = SIGN_EXT_IMM26(instr); |
| 424 return (imm26 + pos); |
| 425 } |
| 426 } |
| 427 |
| 428 PPCPORT_UNIMPLEMENTED(); |
| 429 DCHECK(false); |
| 430 return -1; |
| 431 } |
| 432 |
| 433 |
| 434 void Assembler::target_at_put(int pos, int target_pos) { |
| 435 Instr instr = instr_at(pos); |
| 436 int opcode = instr & kOpcodeMask; |
| 437 |
| 438 // check which type of branch this is 16 or 26 bit offset |
| 439 if (BX == opcode) { |
| 440 int imm26 = target_pos - pos; |
| 441 DCHECK((imm26 & (kAAMask | kLKMask)) == 0); |
| 442 instr &= ((~kImm26Mask) | kAAMask | kLKMask); |
| 443 DCHECK(is_int26(imm26)); |
| 444 instr_at_put(pos, instr | (imm26 & kImm26Mask)); |
| 445 return; |
| 446 } else if (BCX == opcode) { |
| 447 int imm16 = target_pos - pos; |
| 448 DCHECK((imm16 & (kAAMask | kLKMask)) == 0); |
| 449 instr &= ((~kImm16Mask) | kAAMask | kLKMask); |
| 450 DCHECK(is_int16(imm16)); |
| 451 instr_at_put(pos, instr | (imm16 & kImm16Mask)); |
| 452 return; |
| 453 } else if ((instr & ~kImm26Mask) == 0) { |
| 454 DCHECK(target_pos == kEndOfChain || target_pos >= 0); |
| 455 // Emitted link to a label, not part of a branch (regexp PushBacktrack). |
| 456 // Load the position of the label relative to the generated code object |
| 457 // pointer in a register. |
| 458 |
| 459 Register dst = r3; // we assume r3 for now |
| 460 DCHECK(IsNop(instr_at(pos + kInstrSize))); |
| 461 uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag); |
| 462 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2, |
| 463 CodePatcher::DONT_FLUSH); |
| 464 int target_hi = static_cast<int>(target) >> 16; |
| 465 int target_lo = static_cast<int>(target) & 0XFFFF; |
| 466 |
| 467 patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi))); |
| 468 patcher.masm()->ori(dst, dst, Operand(target_lo)); |
| 469 return; |
| 470 } |
| 471 |
| 472 DCHECK(false); |
| 473 } |
| 474 |
| 475 |
| 476 int Assembler::max_reach_from(int pos) { |
| 477 Instr instr = instr_at(pos); |
| 478 int opcode = instr & kOpcodeMask; |
| 479 |
| 480 // check which type of branch this is 16 or 26 bit offset |
| 481 if (BX == opcode) { |
| 482 return 26; |
| 483 } else if (BCX == opcode) { |
| 484 return 16; |
| 485 } else if ((instr & ~kImm26Mask) == 0) { |
| 486 // Emitted label constant, not part of a branch (regexp PushBacktrack). |
| 487 return 26; |
| 488 } |
| 489 |
| 490 DCHECK(false); |
| 491 return 0; |
| 492 } |
| 493 |
| 494 |
| 495 void Assembler::bind_to(Label* L, int pos) { |
| 496 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position |
| 497 int32_t trampoline_pos = kInvalidSlotPos; |
| 498 if (L->is_linked() && !trampoline_emitted_) { |
| 499 unbound_labels_count_--; |
| 500 next_buffer_check_ += kTrampolineSlotsSize; |
| 501 } |
| 502 |
| 503 while (L->is_linked()) { |
| 504 int fixup_pos = L->pos(); |
| 505 int32_t offset = pos - fixup_pos; |
| 506 int maxReach = max_reach_from(fixup_pos); |
| 507 next(L); // call next before overwriting link with target at fixup_pos |
| 508 if (is_intn(offset, maxReach) == false) { |
| 509 if (trampoline_pos == kInvalidSlotPos) { |
| 510 trampoline_pos = get_trampoline_entry(); |
| 511 CHECK(trampoline_pos != kInvalidSlotPos); |
| 512 target_at_put(trampoline_pos, pos); |
| 513 } |
| 514 target_at_put(fixup_pos, trampoline_pos); |
| 515 } else { |
| 516 target_at_put(fixup_pos, pos); |
| 517 } |
| 518 } |
| 519 L->bind_to(pos); |
| 520 |
| 521 // Keep track of the last bound label so we don't eliminate any instructions |
| 522 // before a bound label. |
| 523 if (pos > last_bound_pos_) last_bound_pos_ = pos; |
| 524 } |
| 525 |
| 526 |
| 527 void Assembler::bind(Label* L) { |
| 528 DCHECK(!L->is_bound()); // label can only be bound once |
| 529 bind_to(L, pc_offset()); |
| 530 } |
| 531 |
| 532 |
| 533 void Assembler::next(Label* L) { |
| 534 DCHECK(L->is_linked()); |
| 535 int link = target_at(L->pos()); |
| 536 if (link == kEndOfChain) { |
| 537 L->Unuse(); |
| 538 } else { |
| 539 DCHECK(link >= 0); |
| 540 L->link_to(link); |
| 541 } |
| 542 } |
| 543 |
| 544 |
| 545 bool Assembler::is_near(Label* L, Condition cond) { |
| 546 DCHECK(L->is_bound()); |
| 547 if (L->is_bound() == false) return false; |
| 548 |
| 549 int maxReach = ((cond == al) ? 26 : 16); |
| 550 int offset = L->pos() - pc_offset(); |
| 551 |
| 552 return is_intn(offset, maxReach); |
| 553 } |
| 554 |
| 555 |
| 556 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra, |
| 557 DoubleRegister frb, RCBit r) { |
| 558 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r); |
| 559 } |
| 560 |
| 561 |
| 562 void Assembler::d_form(Instr instr, Register rt, Register ra, |
| 563 const intptr_t val, bool signed_disp) { |
| 564 if (signed_disp) { |
| 565 if (!is_int16(val)) { |
| 566 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val); |
| 567 } |
| 568 DCHECK(is_int16(val)); |
| 569 } else { |
| 570 if (!is_uint16(val)) { |
| 571 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR |
| 572 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n", |
| 573 val, val, is_uint16(val), kImm16Mask); |
| 574 } |
| 575 DCHECK(is_uint16(val)); |
| 576 } |
| 577 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val)); |
| 578 } |
| 579 |
| 580 |
| 581 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb, |
| 582 RCBit r) { |
| 583 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r); |
| 584 } |
| 585 |
| 586 |
| 587 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb, |
| 588 OEBit o, RCBit r) { |
| 589 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r); |
| 590 } |
| 591 |
| 592 |
| 593 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift, |
| 594 int maskbit, RCBit r) { |
| 595 int sh0_4 = shift & 0x1f; |
| 596 int sh5 = (shift >> 5) & 0x1; |
| 597 int m0_4 = maskbit & 0x1f; |
| 598 int m5 = (maskbit >> 5) & 0x1; |
| 599 |
| 600 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 | |
| 601 m5 * B5 | sh5 * B1 | r); |
| 602 } |
| 603 |
| 604 |
| 605 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb, |
| 606 int maskbit, RCBit r) { |
| 607 int m0_4 = maskbit & 0x1f; |
| 608 int m5 = (maskbit >> 5) & 0x1; |
| 609 |
| 610 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 | |
| 611 m5 * B5 | r); |
| 612 } |
| 613 |
| 614 |
| 615 // Returns the next free trampoline entry. |
| 616 int32_t Assembler::get_trampoline_entry() { |
| 617 int32_t trampoline_entry = kInvalidSlotPos; |
| 618 |
| 619 if (!internal_trampoline_exception_) { |
| 620 trampoline_entry = trampoline_.take_slot(); |
| 621 |
| 622 if (kInvalidSlotPos == trampoline_entry) { |
| 623 internal_trampoline_exception_ = true; |
| 624 } |
| 625 } |
| 626 return trampoline_entry; |
| 627 } |
| 628 |
| 629 |
| 630 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
| 631 int target_pos; |
| 632 if (L->is_bound()) { |
| 633 target_pos = L->pos(); |
| 634 } else { |
| 635 if (L->is_linked()) { |
| 636 target_pos = L->pos(); // L's link |
| 637 } else { |
| 638 // was: target_pos = kEndOfChain; |
| 639 // However, using branch to self to mark the first reference |
| 640 // should avoid most instances of branch offset overflow. See |
| 641 // target_at() for where this is converted back to kEndOfChain. |
| 642 target_pos = pc_offset(); |
| 643 if (!trampoline_emitted_) { |
| 644 unbound_labels_count_++; |
| 645 next_buffer_check_ -= kTrampolineSlotsSize; |
| 646 } |
| 647 } |
| 648 L->link_to(pc_offset()); |
| 649 } |
| 650 |
| 651 return target_pos - pc_offset(); |
| 652 } |
| 653 |
| 654 |
| 655 // Branch instructions. |
| 656 |
| 657 |
| 658 void Assembler::bclr(BOfield bo, LKBit lk) { |
| 659 positions_recorder()->WriteRecordedPositions(); |
| 660 emit(EXT1 | bo | BCLRX | lk); |
| 661 } |
| 662 |
| 663 |
| 664 void Assembler::bcctr(BOfield bo, LKBit lk) { |
| 665 positions_recorder()->WriteRecordedPositions(); |
| 666 emit(EXT1 | bo | BCCTRX | lk); |
| 667 } |
| 668 |
| 669 |
| 670 // Pseudo op - branch to link register |
| 671 void Assembler::blr() { bclr(BA, LeaveLK); } |
| 672 |
| 673 |
| 674 // Pseudo op - branch to count register -- used for "jump" |
| 675 void Assembler::bctr() { bcctr(BA, LeaveLK); } |
| 676 |
| 677 |
| 678 void Assembler::bctrl() { bcctr(BA, SetLK); } |
| 679 |
| 680 |
| 681 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) { |
| 682 if (lk == SetLK) { |
| 683 positions_recorder()->WriteRecordedPositions(); |
| 684 } |
| 685 DCHECK(is_int16(branch_offset)); |
| 686 emit(BCX | bo | condition_bit * B16 | (kImm16Mask & branch_offset) | lk); |
| 687 } |
| 688 |
| 689 |
| 690 void Assembler::b(int branch_offset, LKBit lk) { |
| 691 if (lk == SetLK) { |
| 692 positions_recorder()->WriteRecordedPositions(); |
| 693 } |
| 694 DCHECK((branch_offset & 3) == 0); |
| 695 int imm26 = branch_offset; |
| 696 DCHECK(is_int26(imm26)); |
| 697 // todo add AA and LK bits |
| 698 emit(BX | (imm26 & kImm26Mask) | lk); |
| 699 } |
| 700 |
| 701 |
| 702 void Assembler::xori(Register dst, Register src, const Operand& imm) { |
| 703 d_form(XORI, src, dst, imm.imm_, false); |
| 704 } |
| 705 |
| 706 |
| 707 void Assembler::xoris(Register ra, Register rs, const Operand& imm) { |
| 708 d_form(XORIS, rs, ra, imm.imm_, false); |
| 709 } |
| 710 |
| 711 |
| 712 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) { |
| 713 x_form(EXT2 | XORX, dst, src1, src2, rc); |
| 714 } |
| 715 |
| 716 |
| 717 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) { |
| 718 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc); |
| 719 } |
| 720 |
| 721 |
| 722 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) { |
| 723 x_form(EXT2 | ANDX, ra, rs, rb, rc); |
| 724 } |
| 725 |
| 726 |
| 727 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me, |
| 728 RCBit rc) { |
| 729 sh &= 0x1f; |
| 730 mb &= 0x1f; |
| 731 me &= 0x1f; |
| 732 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 | |
| 733 me << 1 | rc); |
| 734 } |
| 735 |
| 736 |
| 737 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me, |
| 738 RCBit rc) { |
| 739 mb &= 0x1f; |
| 740 me &= 0x1f; |
| 741 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 | |
| 742 me << 1 | rc); |
| 743 } |
| 744 |
| 745 |
| 746 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me, |
| 747 RCBit rc) { |
| 748 sh &= 0x1f; |
| 749 mb &= 0x1f; |
| 750 me &= 0x1f; |
| 751 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 | |
| 752 me << 1 | rc); |
| 753 } |
| 754 |
| 755 |
| 756 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) { |
| 757 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 758 rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc); |
| 759 } |
| 760 |
| 761 |
| 762 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) { |
| 763 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 764 rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc); |
| 765 } |
| 766 |
| 767 |
| 768 void Assembler::clrrwi(Register dst, Register src, const Operand& val, |
| 769 RCBit rc) { |
| 770 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 771 rlwinm(dst, src, 0, 0, 31 - val.imm_, rc); |
| 772 } |
| 773 |
| 774 |
| 775 void Assembler::clrlwi(Register dst, Register src, const Operand& val, |
| 776 RCBit rc) { |
| 777 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 778 rlwinm(dst, src, 0, val.imm_, 31, rc); |
| 779 } |
| 780 |
| 781 |
| 782 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) { |
| 783 emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r); |
| 784 } |
| 785 |
| 786 |
| 787 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) { |
| 788 x_form(EXT2 | SRWX, dst, src1, src2, r); |
| 789 } |
| 790 |
| 791 |
| 792 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) { |
| 793 x_form(EXT2 | SLWX, dst, src1, src2, r); |
| 794 } |
| 795 |
| 796 |
| 797 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) { |
| 798 x_form(EXT2 | SRAW, ra, rs, rb, r); |
| 799 } |
| 800 |
| 801 |
| 802 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) { |
| 803 rlwnm(ra, rs, rb, 0, 31, r); |
| 804 } |
| 805 |
| 806 |
| 807 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) { |
| 808 rlwinm(ra, rs, sh, 0, 31, r); |
| 809 } |
| 810 |
| 811 |
| 812 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) { |
| 813 rlwinm(ra, rs, 32 - sh, 0, 31, r); |
| 814 } |
| 815 |
| 816 |
| 817 void Assembler::subi(Register dst, Register src, const Operand& imm) { |
| 818 addi(dst, src, Operand(-(imm.imm_))); |
| 819 } |
| 820 |
| 821 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o, |
| 822 RCBit r) { |
| 823 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r); |
| 824 } |
| 825 |
| 826 |
| 827 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) { |
| 828 // a special xo_form |
| 829 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r); |
| 830 } |
| 831 |
| 832 |
| 833 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o, |
| 834 RCBit r) { |
| 835 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r); |
| 836 } |
| 837 |
| 838 |
| 839 void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o, |
| 840 RCBit r) { |
| 841 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r); |
| 842 } |
| 843 |
| 844 |
| 845 void Assembler::subfic(Register dst, Register src, const Operand& imm) { |
| 846 d_form(SUBFIC, dst, src, imm.imm_, true); |
| 847 } |
| 848 |
| 849 |
| 850 void Assembler::add(Register dst, Register src1, Register src2, OEBit o, |
| 851 RCBit r) { |
| 852 xo_form(EXT2 | ADDX, dst, src1, src2, o, r); |
| 853 } |
| 854 |
| 855 |
| 856 // Multiply low word |
| 857 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o, |
| 858 RCBit r) { |
| 859 xo_form(EXT2 | MULLW, dst, src1, src2, o, r); |
| 860 } |
| 861 |
| 862 |
| 863 // Multiply hi word |
| 864 void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o, |
| 865 RCBit r) { |
| 866 xo_form(EXT2 | MULHWX, dst, src1, src2, o, r); |
| 867 } |
| 868 |
| 869 |
| 870 // Divide word |
| 871 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o, |
| 872 RCBit r) { |
| 873 xo_form(EXT2 | DIVW, dst, src1, src2, o, r); |
| 874 } |
| 875 |
| 876 |
| 877 void Assembler::addi(Register dst, Register src, const Operand& imm) { |
| 878 DCHECK(!src.is(r0)); // use li instead to show intent |
| 879 d_form(ADDI, dst, src, imm.imm_, true); |
| 880 } |
| 881 |
| 882 |
| 883 void Assembler::addis(Register dst, Register src, const Operand& imm) { |
| 884 DCHECK(!src.is(r0)); // use lis instead to show intent |
| 885 d_form(ADDIS, dst, src, imm.imm_, true); |
| 886 } |
| 887 |
| 888 |
| 889 void Assembler::addic(Register dst, Register src, const Operand& imm) { |
| 890 d_form(ADDIC, dst, src, imm.imm_, true); |
| 891 } |
| 892 |
| 893 |
| 894 void Assembler::andi(Register ra, Register rs, const Operand& imm) { |
| 895 d_form(ANDIx, rs, ra, imm.imm_, false); |
| 896 } |
| 897 |
| 898 |
| 899 void Assembler::andis(Register ra, Register rs, const Operand& imm) { |
| 900 d_form(ANDISx, rs, ra, imm.imm_, false); |
| 901 } |
| 902 |
| 903 |
| 904 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) { |
| 905 x_form(EXT2 | NORX, dst, src1, src2, r); |
| 906 } |
| 907 |
| 908 |
| 909 void Assembler::notx(Register dst, Register src, RCBit r) { |
| 910 x_form(EXT2 | NORX, dst, src, src, r); |
| 911 } |
| 912 |
| 913 |
| 914 void Assembler::ori(Register ra, Register rs, const Operand& imm) { |
| 915 d_form(ORI, rs, ra, imm.imm_, false); |
| 916 } |
| 917 |
| 918 |
| 919 void Assembler::oris(Register dst, Register src, const Operand& imm) { |
| 920 d_form(ORIS, src, dst, imm.imm_, false); |
| 921 } |
| 922 |
| 923 |
| 924 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) { |
| 925 x_form(EXT2 | ORX, dst, src1, src2, rc); |
| 926 } |
| 927 |
| 928 |
| 929 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) { |
| 930 intptr_t imm16 = src2.imm_; |
| 931 #if V8_TARGET_ARCH_PPC64 |
| 932 int L = 1; |
| 933 #else |
| 934 int L = 0; |
| 935 #endif |
| 936 DCHECK(is_int16(imm16)); |
| 937 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 938 imm16 &= kImm16Mask; |
| 939 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16); |
| 940 } |
| 941 |
| 942 |
| 943 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) { |
| 944 uintptr_t uimm16 = src2.imm_; |
| 945 #if V8_TARGET_ARCH_PPC64 |
| 946 int L = 1; |
| 947 #else |
| 948 int L = 0; |
| 949 #endif |
| 950 DCHECK(is_uint16(uimm16)); |
| 951 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 952 uimm16 &= kImm16Mask; |
| 953 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16); |
| 954 } |
| 955 |
| 956 |
| 957 void Assembler::cmp(Register src1, Register src2, CRegister cr) { |
| 958 #if V8_TARGET_ARCH_PPC64 |
| 959 int L = 1; |
| 960 #else |
| 961 int L = 0; |
| 962 #endif |
| 963 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 964 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 | |
| 965 src2.code() * B11); |
| 966 } |
| 967 |
| 968 |
| 969 void Assembler::cmpl(Register src1, Register src2, CRegister cr) { |
| 970 #if V8_TARGET_ARCH_PPC64 |
| 971 int L = 1; |
| 972 #else |
| 973 int L = 0; |
| 974 #endif |
| 975 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 976 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 | |
| 977 src2.code() * B11); |
| 978 } |
| 979 |
| 980 |
| 981 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) { |
| 982 intptr_t imm16 = src2.imm_; |
| 983 int L = 0; |
| 984 DCHECK(is_int16(imm16)); |
| 985 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 986 imm16 &= kImm16Mask; |
| 987 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16); |
| 988 } |
| 989 |
| 990 |
| 991 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) { |
| 992 uintptr_t uimm16 = src2.imm_; |
| 993 int L = 0; |
| 994 DCHECK(is_uint16(uimm16)); |
| 995 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 996 uimm16 &= kImm16Mask; |
| 997 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16); |
| 998 } |
| 999 |
| 1000 |
| 1001 void Assembler::cmpw(Register src1, Register src2, CRegister cr) { |
| 1002 int L = 0; |
| 1003 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1004 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 | |
| 1005 src2.code() * B11); |
| 1006 } |
| 1007 |
| 1008 |
| 1009 void Assembler::cmplw(Register src1, Register src2, CRegister cr) { |
| 1010 int L = 0; |
| 1011 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1012 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 | |
| 1013 src2.code() * B11); |
| 1014 } |
| 1015 |
| 1016 |
| 1017 // Pseudo op - load immediate |
| 1018 void Assembler::li(Register dst, const Operand& imm) { |
| 1019 d_form(ADDI, dst, r0, imm.imm_, true); |
| 1020 } |
| 1021 |
| 1022 |
| 1023 void Assembler::lis(Register dst, const Operand& imm) { |
| 1024 d_form(ADDIS, dst, r0, imm.imm_, true); |
| 1025 } |
| 1026 |
| 1027 |
| 1028 // Pseudo op - move register |
| 1029 void Assembler::mr(Register dst, Register src) { |
| 1030 // actually or(dst, src, src) |
| 1031 orx(dst, src, src); |
| 1032 } |
| 1033 |
| 1034 |
| 1035 void Assembler::lbz(Register dst, const MemOperand& src) { |
| 1036 DCHECK(!src.ra_.is(r0)); |
| 1037 d_form(LBZ, dst, src.ra(), src.offset(), true); |
| 1038 } |
| 1039 |
| 1040 |
| 1041 void Assembler::lbzx(Register rt, const MemOperand& src) { |
| 1042 Register ra = src.ra(); |
| 1043 Register rb = src.rb(); |
| 1044 DCHECK(!ra.is(r0)); |
| 1045 emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1046 LeaveRC); |
| 1047 } |
| 1048 |
| 1049 |
| 1050 void Assembler::lbzux(Register rt, const MemOperand& src) { |
| 1051 Register ra = src.ra(); |
| 1052 Register rb = src.rb(); |
| 1053 DCHECK(!ra.is(r0)); |
| 1054 emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1055 LeaveRC); |
| 1056 } |
| 1057 |
| 1058 |
| 1059 void Assembler::lhz(Register dst, const MemOperand& src) { |
| 1060 DCHECK(!src.ra_.is(r0)); |
| 1061 d_form(LHZ, dst, src.ra(), src.offset(), true); |
| 1062 } |
| 1063 |
| 1064 |
| 1065 void Assembler::lhzx(Register rt, const MemOperand& src) { |
| 1066 Register ra = src.ra(); |
| 1067 Register rb = src.rb(); |
| 1068 DCHECK(!ra.is(r0)); |
| 1069 emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1070 LeaveRC); |
| 1071 } |
| 1072 |
| 1073 |
| 1074 void Assembler::lhzux(Register rt, const MemOperand& src) { |
| 1075 Register ra = src.ra(); |
| 1076 Register rb = src.rb(); |
| 1077 DCHECK(!ra.is(r0)); |
| 1078 emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1079 LeaveRC); |
| 1080 } |
| 1081 |
| 1082 |
| 1083 void Assembler::lwz(Register dst, const MemOperand& src) { |
| 1084 DCHECK(!src.ra_.is(r0)); |
| 1085 d_form(LWZ, dst, src.ra(), src.offset(), true); |
| 1086 } |
| 1087 |
| 1088 |
| 1089 void Assembler::lwzu(Register dst, const MemOperand& src) { |
| 1090 DCHECK(!src.ra_.is(r0)); |
| 1091 d_form(LWZU, dst, src.ra(), src.offset(), true); |
| 1092 } |
| 1093 |
| 1094 |
| 1095 void Assembler::lwzx(Register rt, const MemOperand& src) { |
| 1096 Register ra = src.ra(); |
| 1097 Register rb = src.rb(); |
| 1098 DCHECK(!ra.is(r0)); |
| 1099 emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1100 LeaveRC); |
| 1101 } |
| 1102 |
| 1103 |
| 1104 void Assembler::lwzux(Register rt, const MemOperand& src) { |
| 1105 Register ra = src.ra(); |
| 1106 Register rb = src.rb(); |
| 1107 DCHECK(!ra.is(r0)); |
| 1108 emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1109 LeaveRC); |
| 1110 } |
| 1111 |
| 1112 |
| 1113 void Assembler::lwa(Register dst, const MemOperand& src) { |
| 1114 #if V8_TARGET_ARCH_PPC64 |
| 1115 int offset = src.offset(); |
| 1116 DCHECK(!src.ra_.is(r0)); |
| 1117 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1118 offset = kImm16Mask & offset; |
| 1119 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2); |
| 1120 #else |
| 1121 lwz(dst, src); |
| 1122 #endif |
| 1123 } |
| 1124 |
| 1125 |
| 1126 void Assembler::stb(Register dst, const MemOperand& src) { |
| 1127 DCHECK(!src.ra_.is(r0)); |
| 1128 d_form(STB, dst, src.ra(), src.offset(), true); |
| 1129 } |
| 1130 |
| 1131 |
| 1132 void Assembler::stbx(Register rs, const MemOperand& src) { |
| 1133 Register ra = src.ra(); |
| 1134 Register rb = src.rb(); |
| 1135 DCHECK(!ra.is(r0)); |
| 1136 emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1137 LeaveRC); |
| 1138 } |
| 1139 |
| 1140 |
| 1141 void Assembler::stbux(Register rs, const MemOperand& src) { |
| 1142 Register ra = src.ra(); |
| 1143 Register rb = src.rb(); |
| 1144 DCHECK(!ra.is(r0)); |
| 1145 emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1146 LeaveRC); |
| 1147 } |
| 1148 |
| 1149 |
| 1150 void Assembler::sth(Register dst, const MemOperand& src) { |
| 1151 DCHECK(!src.ra_.is(r0)); |
| 1152 d_form(STH, dst, src.ra(), src.offset(), true); |
| 1153 } |
| 1154 |
| 1155 |
| 1156 void Assembler::sthx(Register rs, const MemOperand& src) { |
| 1157 Register ra = src.ra(); |
| 1158 Register rb = src.rb(); |
| 1159 DCHECK(!ra.is(r0)); |
| 1160 emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1161 LeaveRC); |
| 1162 } |
| 1163 |
| 1164 |
| 1165 void Assembler::sthux(Register rs, const MemOperand& src) { |
| 1166 Register ra = src.ra(); |
| 1167 Register rb = src.rb(); |
| 1168 DCHECK(!ra.is(r0)); |
| 1169 emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1170 LeaveRC); |
| 1171 } |
| 1172 |
| 1173 |
| 1174 void Assembler::stw(Register dst, const MemOperand& src) { |
| 1175 DCHECK(!src.ra_.is(r0)); |
| 1176 d_form(STW, dst, src.ra(), src.offset(), true); |
| 1177 } |
| 1178 |
| 1179 |
| 1180 void Assembler::stwu(Register dst, const MemOperand& src) { |
| 1181 DCHECK(!src.ra_.is(r0)); |
| 1182 d_form(STWU, dst, src.ra(), src.offset(), true); |
| 1183 } |
| 1184 |
| 1185 |
| 1186 void Assembler::stwx(Register rs, const MemOperand& src) { |
| 1187 Register ra = src.ra(); |
| 1188 Register rb = src.rb(); |
| 1189 DCHECK(!ra.is(r0)); |
| 1190 emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1191 LeaveRC); |
| 1192 } |
| 1193 |
| 1194 |
| 1195 void Assembler::stwux(Register rs, const MemOperand& src) { |
| 1196 Register ra = src.ra(); |
| 1197 Register rb = src.rb(); |
| 1198 DCHECK(!ra.is(r0)); |
| 1199 emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1200 LeaveRC); |
| 1201 } |
| 1202 |
| 1203 |
| 1204 void Assembler::extsb(Register rs, Register ra, RCBit rc) { |
| 1205 emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc); |
| 1206 } |
| 1207 |
| 1208 |
| 1209 void Assembler::extsh(Register rs, Register ra, RCBit rc) { |
| 1210 emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc); |
| 1211 } |
| 1212 |
| 1213 |
| 1214 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) { |
| 1215 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r); |
| 1216 } |
| 1217 |
| 1218 |
| 1219 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) { |
| 1220 x_form(EXT2 | ANDCX, dst, src1, src2, rc); |
| 1221 } |
| 1222 |
| 1223 |
| 1224 #if V8_TARGET_ARCH_PPC64 |
| 1225 // 64bit specific instructions |
| 1226 void Assembler::ld(Register rd, const MemOperand& src) { |
| 1227 int offset = src.offset(); |
| 1228 DCHECK(!src.ra_.is(r0)); |
| 1229 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1230 offset = kImm16Mask & offset; |
| 1231 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset); |
| 1232 } |
| 1233 |
| 1234 |
| 1235 void Assembler::ldx(Register rd, const MemOperand& src) { |
| 1236 Register ra = src.ra(); |
| 1237 Register rb = src.rb(); |
| 1238 DCHECK(!ra.is(r0)); |
| 1239 emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11); |
| 1240 } |
| 1241 |
| 1242 |
| 1243 void Assembler::ldu(Register rd, const MemOperand& src) { |
| 1244 int offset = src.offset(); |
| 1245 DCHECK(!src.ra_.is(r0)); |
| 1246 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1247 offset = kImm16Mask & offset; |
| 1248 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1); |
| 1249 } |
| 1250 |
| 1251 |
| 1252 void Assembler::ldux(Register rd, const MemOperand& src) { |
| 1253 Register ra = src.ra(); |
| 1254 Register rb = src.rb(); |
| 1255 DCHECK(!ra.is(r0)); |
| 1256 emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11); |
| 1257 } |
| 1258 |
| 1259 |
| 1260 void Assembler::std(Register rs, const MemOperand& src) { |
| 1261 int offset = src.offset(); |
| 1262 DCHECK(!src.ra_.is(r0)); |
| 1263 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1264 offset = kImm16Mask & offset; |
| 1265 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset); |
| 1266 } |
| 1267 |
| 1268 |
| 1269 void Assembler::stdx(Register rs, const MemOperand& src) { |
| 1270 Register ra = src.ra(); |
| 1271 Register rb = src.rb(); |
| 1272 DCHECK(!ra.is(r0)); |
| 1273 emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11); |
| 1274 } |
| 1275 |
| 1276 |
| 1277 void Assembler::stdu(Register rs, const MemOperand& src) { |
| 1278 int offset = src.offset(); |
| 1279 DCHECK(!src.ra_.is(r0)); |
| 1280 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1281 offset = kImm16Mask & offset; |
| 1282 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1); |
| 1283 } |
| 1284 |
| 1285 |
| 1286 void Assembler::stdux(Register rs, const MemOperand& src) { |
| 1287 Register ra = src.ra(); |
| 1288 Register rb = src.rb(); |
| 1289 DCHECK(!ra.is(r0)); |
| 1290 emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11); |
| 1291 } |
| 1292 |
| 1293 |
| 1294 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1295 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r); |
| 1296 } |
| 1297 |
| 1298 |
| 1299 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1300 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r); |
| 1301 } |
| 1302 |
| 1303 |
| 1304 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) { |
| 1305 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r); |
| 1306 } |
| 1307 |
| 1308 |
| 1309 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) { |
| 1310 md_form(EXT5 | RLDICR, ra, rs, sh, me, r); |
| 1311 } |
| 1312 |
| 1313 |
| 1314 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) { |
| 1315 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1316 rldicr(dst, src, val.imm_, 63 - val.imm_, rc); |
| 1317 } |
| 1318 |
| 1319 |
| 1320 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) { |
| 1321 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1322 rldicl(dst, src, 64 - val.imm_, val.imm_, rc); |
| 1323 } |
| 1324 |
| 1325 |
| 1326 void Assembler::clrrdi(Register dst, Register src, const Operand& val, |
| 1327 RCBit rc) { |
| 1328 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1329 rldicr(dst, src, 0, 63 - val.imm_, rc); |
| 1330 } |
| 1331 |
| 1332 |
| 1333 void Assembler::clrldi(Register dst, Register src, const Operand& val, |
| 1334 RCBit rc) { |
| 1335 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1336 rldicl(dst, src, 0, val.imm_, rc); |
| 1337 } |
| 1338 |
| 1339 |
| 1340 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1341 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r); |
| 1342 } |
| 1343 |
| 1344 |
| 1345 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) { |
| 1346 int sh0_4 = sh & 0x1f; |
| 1347 int sh5 = (sh >> 5) & 0x1; |
| 1348 |
| 1349 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | |
| 1350 sh5 * B1 | r); |
| 1351 } |
| 1352 |
| 1353 |
| 1354 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) { |
| 1355 x_form(EXT2 | SRDX, dst, src1, src2, r); |
| 1356 } |
| 1357 |
| 1358 |
| 1359 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) { |
| 1360 x_form(EXT2 | SLDX, dst, src1, src2, r); |
| 1361 } |
| 1362 |
| 1363 |
| 1364 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) { |
| 1365 x_form(EXT2 | SRAD, ra, rs, rb, r); |
| 1366 } |
| 1367 |
| 1368 |
| 1369 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) { |
| 1370 rldcl(ra, rs, rb, 0, r); |
| 1371 } |
| 1372 |
| 1373 |
| 1374 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) { |
| 1375 rldicl(ra, rs, sh, 0, r); |
| 1376 } |
| 1377 |
| 1378 |
| 1379 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) { |
| 1380 rldicl(ra, rs, 64 - sh, 0, r); |
| 1381 } |
| 1382 |
| 1383 |
| 1384 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) { |
| 1385 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc); |
| 1386 } |
| 1387 |
| 1388 |
| 1389 void Assembler::extsw(Register rs, Register ra, RCBit rc) { |
| 1390 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc); |
| 1391 } |
| 1392 |
| 1393 |
| 1394 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o, |
| 1395 RCBit r) { |
| 1396 xo_form(EXT2 | MULLD, dst, src1, src2, o, r); |
| 1397 } |
| 1398 |
| 1399 |
| 1400 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o, |
| 1401 RCBit r) { |
| 1402 xo_form(EXT2 | DIVD, dst, src1, src2, o, r); |
| 1403 } |
| 1404 #endif |
| 1405 |
| 1406 |
| 1407 void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) { |
| 1408 DCHECK(fopcode < fLastFaker); |
| 1409 emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode); |
| 1410 } |
| 1411 |
| 1412 |
| 1413 void Assembler::marker_asm(int mcode) { |
| 1414 if (::v8::internal::FLAG_trace_sim_stubs) { |
| 1415 DCHECK(mcode < F_NEXT_AVAILABLE_STUB_MARKER); |
| 1416 emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode); |
| 1417 } |
| 1418 } |
| 1419 |
| 1420 |
| 1421 // Function descriptor for AIX. |
| 1422 // Code address skips the function descriptor "header". |
| 1423 // TOC and static chain are ignored and set to 0. |
| 1424 void Assembler::function_descriptor() { |
| 1425 DCHECK(pc_offset() == 0); |
| 1426 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); |
| 1427 emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize); |
| 1428 emit_ptr(0); |
| 1429 emit_ptr(0); |
| 1430 } |
| 1431 |
| 1432 |
| 1433 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 1434 void Assembler::RelocateInternalReference(Address pc, intptr_t delta, |
| 1435 Address code_start, |
| 1436 ICacheFlushMode icache_flush_mode) { |
| 1437 DCHECK(delta || code_start); |
| 1438 #if ABI_USES_FUNCTION_DESCRIPTORS |
| 1439 uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc); |
| 1440 if (fd[1] == 0 && fd[2] == 0) { |
| 1441 // Function descriptor |
| 1442 if (delta) { |
| 1443 fd[0] += delta; |
| 1444 } else { |
| 1445 fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize; |
| 1446 } |
| 1447 return; |
| 1448 } |
| 1449 #endif |
| 1450 #if V8_OOL_CONSTANT_POOL |
| 1451 // mov for LoadConstantPoolPointerRegister |
| 1452 ConstantPoolArray* constant_pool = NULL; |
| 1453 if (delta) { |
| 1454 code_start = target_address_at(pc, constant_pool) + delta; |
| 1455 } |
| 1456 set_target_address_at(pc, constant_pool, code_start, icache_flush_mode); |
| 1457 #endif |
| 1458 } |
| 1459 |
| 1460 |
| 1461 int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) { |
| 1462 #if ABI_USES_FUNCTION_DESCRIPTORS |
| 1463 uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc); |
| 1464 if (fd[1] == 0 && fd[2] == 0) { |
| 1465 // Function descriptor |
| 1466 SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR |
| 1467 "]" |
| 1468 " function descriptor", |
| 1469 fd[0], fd[1], fd[2]); |
| 1470 return kPointerSize * 3; |
| 1471 } |
| 1472 #endif |
| 1473 return 0; |
| 1474 } |
| 1475 #endif |
| 1476 |
| 1477 |
| 1478 int Assembler::instructions_required_for_mov(const Operand& x) const { |
| 1479 #if V8_OOL_CONSTANT_POOL || DEBUG |
| 1480 bool canOptimize = |
| 1481 !(x.must_output_reloc_info(this) || is_trampoline_pool_blocked()); |
| 1482 #endif |
| 1483 #if V8_OOL_CONSTANT_POOL |
| 1484 if (use_constant_pool_for_mov(x, canOptimize)) { |
| 1485 // Current usage guarantees that all constant pool references can |
| 1486 // use the same sequence. |
| 1487 return kMovInstructionsConstantPool; |
| 1488 } |
| 1489 #endif |
| 1490 DCHECK(!canOptimize); |
| 1491 return kMovInstructionsNoConstantPool; |
| 1492 } |
| 1493 |
| 1494 |
| 1495 #if V8_OOL_CONSTANT_POOL |
| 1496 bool Assembler::use_constant_pool_for_mov(const Operand& x, |
| 1497 bool canOptimize) const { |
| 1498 if (!is_ool_constant_pool_available() || is_constant_pool_full()) { |
| 1499 // If there is no constant pool available, we must use a mov |
| 1500 // immediate sequence. |
| 1501 return false; |
| 1502 } |
| 1503 |
| 1504 intptr_t value = x.immediate(); |
| 1505 if (canOptimize && is_int16(value)) { |
| 1506 // Prefer a single-instruction load-immediate. |
| 1507 return false; |
| 1508 } |
| 1509 |
| 1510 return true; |
| 1511 } |
| 1512 |
| 1513 |
| 1514 void Assembler::EnsureSpaceFor(int space_needed) { |
| 1515 if (buffer_space() <= (kGap + space_needed)) { |
| 1516 GrowBuffer(); |
| 1517 } |
| 1518 } |
| 1519 #endif |
| 1520 |
| 1521 |
| 1522 bool Operand::must_output_reloc_info(const Assembler* assembler) const { |
| 1523 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { |
| 1524 if (assembler != NULL && assembler->predictable_code_size()) return true; |
| 1525 return assembler->serializer_enabled(); |
| 1526 } else if (RelocInfo::IsNone(rmode_)) { |
| 1527 return false; |
| 1528 } |
| 1529 return true; |
| 1530 } |
| 1531 |
| 1532 |
| 1533 // Primarily used for loading constants |
| 1534 // This should really move to be in macro-assembler as it |
| 1535 // is really a pseudo instruction |
| 1536 // Some usages of this intend for a FIXED_SEQUENCE to be used |
| 1537 // Todo - break this dependency so we can optimize mov() in general |
| 1538 // and only use the generic version when we require a fixed sequence |
| 1539 void Assembler::mov(Register dst, const Operand& src) { |
| 1540 intptr_t value = src.immediate(); |
| 1541 bool canOptimize; |
| 1542 RelocInfo rinfo(pc_, src.rmode_, value, NULL); |
| 1543 |
| 1544 if (src.must_output_reloc_info(this)) { |
| 1545 RecordRelocInfo(rinfo); |
| 1546 } |
| 1547 |
| 1548 canOptimize = |
| 1549 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked()); |
| 1550 |
| 1551 #if V8_OOL_CONSTANT_POOL |
| 1552 if (use_constant_pool_for_mov(src, canOptimize)) { |
| 1553 DCHECK(is_ool_constant_pool_available()); |
| 1554 ConstantPoolAddEntry(rinfo); |
| 1555 #if V8_TARGET_ARCH_PPC64 |
| 1556 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1557 // We are forced to use 2 instruction sequence since the constant |
| 1558 // pool pointer is tagged. |
| 1559 li(dst, Operand::Zero()); |
| 1560 ldx(dst, MemOperand(kConstantPoolRegister, dst)); |
| 1561 #else |
| 1562 lwz(dst, MemOperand(kConstantPoolRegister, 0)); |
| 1563 #endif |
| 1564 return; |
| 1565 } |
| 1566 #endif |
| 1567 |
| 1568 if (canOptimize) { |
| 1569 if (is_int16(value)) { |
| 1570 li(dst, Operand(value)); |
| 1571 } else { |
| 1572 uint16_t u16; |
| 1573 #if V8_TARGET_ARCH_PPC64 |
| 1574 if (is_int32(value)) { |
| 1575 #endif |
| 1576 lis(dst, Operand(value >> 16)); |
| 1577 #if V8_TARGET_ARCH_PPC64 |
| 1578 } else { |
| 1579 if (is_int48(value)) { |
| 1580 li(dst, Operand(value >> 32)); |
| 1581 } else { |
| 1582 lis(dst, Operand(value >> 48)); |
| 1583 u16 = ((value >> 32) & 0xffff); |
| 1584 if (u16) { |
| 1585 ori(dst, dst, Operand(u16)); |
| 1586 } |
| 1587 } |
| 1588 sldi(dst, dst, Operand(32)); |
| 1589 u16 = ((value >> 16) & 0xffff); |
| 1590 if (u16) { |
| 1591 oris(dst, dst, Operand(u16)); |
| 1592 } |
| 1593 } |
| 1594 #endif |
| 1595 u16 = (value & 0xffff); |
| 1596 if (u16) { |
| 1597 ori(dst, dst, Operand(u16)); |
| 1598 } |
| 1599 } |
| 1600 return; |
| 1601 } |
| 1602 |
| 1603 DCHECK(!canOptimize); |
| 1604 |
| 1605 { |
| 1606 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1607 #if V8_TARGET_ARCH_PPC64 |
| 1608 int32_t hi_32 = static_cast<int32_t>(value >> 32); |
| 1609 int32_t lo_32 = static_cast<int32_t>(value); |
| 1610 int hi_word = static_cast<int>(hi_32 >> 16); |
| 1611 int lo_word = static_cast<int>(hi_32 & 0xffff); |
| 1612 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); |
| 1613 ori(dst, dst, Operand(lo_word)); |
| 1614 sldi(dst, dst, Operand(32)); |
| 1615 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff)); |
| 1616 lo_word = static_cast<int>(lo_32 & 0xffff); |
| 1617 oris(dst, dst, Operand(hi_word)); |
| 1618 ori(dst, dst, Operand(lo_word)); |
| 1619 #else |
| 1620 int hi_word = static_cast<int>(value >> 16); |
| 1621 int lo_word = static_cast<int>(value & 0xffff); |
| 1622 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); |
| 1623 ori(dst, dst, Operand(lo_word)); |
| 1624 #endif |
| 1625 } |
| 1626 } |
| 1627 |
| 1628 |
| 1629 void Assembler::mov_label_offset(Register dst, Label* label) { |
| 1630 if (label->is_bound()) { |
| 1631 int target = label->pos(); |
| 1632 mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag)); |
| 1633 } else { |
| 1634 bool is_linked = label->is_linked(); |
| 1635 // Emit the link to the label in the code stream followed by extra |
| 1636 // nop instructions. |
| 1637 DCHECK(dst.is(r3)); // target_at_put assumes r3 for now |
| 1638 int link = is_linked ? label->pos() - pc_offset() : 0; |
| 1639 label->link_to(pc_offset()); |
| 1640 |
| 1641 if (!is_linked && !trampoline_emitted_) { |
| 1642 unbound_labels_count_++; |
| 1643 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1644 } |
| 1645 |
| 1646 // When the label is bound, these instructions will be patched |
| 1647 // with a 2 instruction mov sequence that will load the |
| 1648 // destination register with the position of the label from the |
| 1649 // beginning of the code. |
| 1650 // |
| 1651 // When the label gets bound: target_at extracts the link and |
| 1652 // target_at_put patches the instructions. |
| 1653 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1654 emit(link); |
| 1655 nop(); |
| 1656 } |
| 1657 } |
| 1658 |
| 1659 |
| 1660 // Special register instructions |
| 1661 void Assembler::crxor(int bt, int ba, int bb) { |
| 1662 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11); |
| 1663 } |
| 1664 |
| 1665 |
| 1666 void Assembler::creqv(int bt, int ba, int bb) { |
| 1667 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11); |
| 1668 } |
| 1669 |
| 1670 |
| 1671 void Assembler::mflr(Register dst) { |
| 1672 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit |
| 1673 } |
| 1674 |
| 1675 |
| 1676 void Assembler::mtlr(Register src) { |
| 1677 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit |
| 1678 } |
| 1679 |
| 1680 |
| 1681 void Assembler::mtctr(Register src) { |
| 1682 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit |
| 1683 } |
| 1684 |
| 1685 |
| 1686 void Assembler::mtxer(Register src) { |
| 1687 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11); |
| 1688 } |
| 1689 |
| 1690 |
| 1691 void Assembler::mcrfs(int bf, int bfa) { |
| 1692 emit(EXT4 | MCRFS | bf * B23 | bfa * B18); |
| 1693 } |
| 1694 |
| 1695 |
| 1696 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); } |
| 1697 |
| 1698 |
| 1699 #if V8_TARGET_ARCH_PPC64 |
| 1700 void Assembler::mffprd(Register dst, DoubleRegister src) { |
| 1701 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16); |
| 1702 } |
| 1703 |
| 1704 |
| 1705 void Assembler::mffprwz(Register dst, DoubleRegister src) { |
| 1706 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16); |
| 1707 } |
| 1708 |
| 1709 |
| 1710 void Assembler::mtfprd(DoubleRegister dst, Register src) { |
| 1711 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16); |
| 1712 } |
| 1713 |
| 1714 |
| 1715 void Assembler::mtfprwz(DoubleRegister dst, Register src) { |
| 1716 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16); |
| 1717 } |
| 1718 |
| 1719 |
| 1720 void Assembler::mtfprwa(DoubleRegister dst, Register src) { |
| 1721 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16); |
| 1722 } |
| 1723 #endif |
| 1724 |
| 1725 |
| 1726 // Exception-generating instructions and debugging support. |
| 1727 // Stops with a non-negative code less than kNumOfWatchedStops support |
| 1728 // enabling/disabling and a counter feature. See simulator-ppc.h . |
| 1729 void Assembler::stop(const char* msg, Condition cond, int32_t code, |
| 1730 CRegister cr) { |
| 1731 if (cond != al) { |
| 1732 Label skip; |
| 1733 b(NegateCondition(cond), &skip, cr); |
| 1734 bkpt(0); |
| 1735 bind(&skip); |
| 1736 } else { |
| 1737 bkpt(0); |
| 1738 } |
| 1739 } |
| 1740 |
| 1741 |
| 1742 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); } |
| 1743 |
| 1744 |
| 1745 void Assembler::info(const char* msg, Condition cond, int32_t code, |
| 1746 CRegister cr) { |
| 1747 if (::v8::internal::FLAG_trace_sim_stubs) { |
| 1748 emit(0x7d9ff808); |
| 1749 #if V8_TARGET_ARCH_PPC64 |
| 1750 uint64_t value = reinterpret_cast<uint64_t>(msg); |
| 1751 emit(static_cast<uint32_t>(value >> 32)); |
| 1752 emit(static_cast<uint32_t>(value & 0xFFFFFFFF)); |
| 1753 #else |
| 1754 emit(reinterpret_cast<Instr>(msg)); |
| 1755 #endif |
| 1756 } |
| 1757 } |
| 1758 |
| 1759 |
| 1760 void Assembler::dcbf(Register ra, Register rb) { |
| 1761 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11); |
| 1762 } |
| 1763 |
| 1764 |
| 1765 void Assembler::sync() { emit(EXT2 | SYNC); } |
| 1766 |
| 1767 |
| 1768 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); } |
| 1769 |
| 1770 |
| 1771 void Assembler::icbi(Register ra, Register rb) { |
| 1772 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11); |
| 1773 } |
| 1774 |
| 1775 |
| 1776 void Assembler::isync() { emit(EXT1 | ISYNC); } |
| 1777 |
| 1778 |
| 1779 // Floating point support |
| 1780 |
| 1781 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) { |
| 1782 int offset = src.offset(); |
| 1783 Register ra = src.ra(); |
| 1784 DCHECK(is_int16(offset)); |
| 1785 int imm16 = offset & kImm16Mask; |
| 1786 // could be x_form instruction with some casting magic |
| 1787 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16); |
| 1788 } |
| 1789 |
| 1790 |
| 1791 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) { |
| 1792 int offset = src.offset(); |
| 1793 Register ra = src.ra(); |
| 1794 DCHECK(is_int16(offset)); |
| 1795 int imm16 = offset & kImm16Mask; |
| 1796 // could be x_form instruction with some casting magic |
| 1797 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16); |
| 1798 } |
| 1799 |
| 1800 |
| 1801 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) { |
| 1802 Register ra = src.ra(); |
| 1803 Register rb = src.rb(); |
| 1804 DCHECK(!ra.is(r0)); |
| 1805 emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1806 LeaveRC); |
| 1807 } |
| 1808 |
| 1809 |
| 1810 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) { |
| 1811 Register ra = src.ra(); |
| 1812 Register rb = src.rb(); |
| 1813 DCHECK(!ra.is(r0)); |
| 1814 emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1815 LeaveRC); |
| 1816 } |
| 1817 |
| 1818 |
| 1819 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) { |
| 1820 int offset = src.offset(); |
| 1821 Register ra = src.ra(); |
| 1822 DCHECK(is_int16(offset)); |
| 1823 DCHECK(!ra.is(r0)); |
| 1824 int imm16 = offset & kImm16Mask; |
| 1825 // could be x_form instruction with some casting magic |
| 1826 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16); |
| 1827 } |
| 1828 |
| 1829 |
| 1830 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) { |
| 1831 int offset = src.offset(); |
| 1832 Register ra = src.ra(); |
| 1833 DCHECK(is_int16(offset)); |
| 1834 DCHECK(!ra.is(r0)); |
| 1835 int imm16 = offset & kImm16Mask; |
| 1836 // could be x_form instruction with some casting magic |
| 1837 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16); |
| 1838 } |
| 1839 |
| 1840 |
| 1841 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) { |
| 1842 Register ra = src.ra(); |
| 1843 Register rb = src.rb(); |
| 1844 DCHECK(!ra.is(r0)); |
| 1845 emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1846 LeaveRC); |
| 1847 } |
| 1848 |
| 1849 |
| 1850 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) { |
| 1851 Register ra = src.ra(); |
| 1852 Register rb = src.rb(); |
| 1853 DCHECK(!ra.is(r0)); |
| 1854 emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1855 LeaveRC); |
| 1856 } |
| 1857 |
| 1858 |
| 1859 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) { |
| 1860 int offset = src.offset(); |
| 1861 Register ra = src.ra(); |
| 1862 DCHECK(is_int16(offset)); |
| 1863 DCHECK(!ra.is(r0)); |
| 1864 int imm16 = offset & kImm16Mask; |
| 1865 // could be x_form instruction with some casting magic |
| 1866 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16); |
| 1867 } |
| 1868 |
| 1869 |
| 1870 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) { |
| 1871 int offset = src.offset(); |
| 1872 Register ra = src.ra(); |
| 1873 DCHECK(is_int16(offset)); |
| 1874 DCHECK(!ra.is(r0)); |
| 1875 int imm16 = offset & kImm16Mask; |
| 1876 // could be x_form instruction with some casting magic |
| 1877 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16); |
| 1878 } |
| 1879 |
| 1880 |
| 1881 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) { |
| 1882 Register ra = src.ra(); |
| 1883 Register rb = src.rb(); |
| 1884 DCHECK(!ra.is(r0)); |
| 1885 emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1886 LeaveRC); |
| 1887 } |
| 1888 |
| 1889 |
| 1890 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) { |
| 1891 Register ra = src.ra(); |
| 1892 Register rb = src.rb(); |
| 1893 DCHECK(!ra.is(r0)); |
| 1894 emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1895 LeaveRC); |
| 1896 } |
| 1897 |
| 1898 |
| 1899 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) { |
| 1900 int offset = src.offset(); |
| 1901 Register ra = src.ra(); |
| 1902 DCHECK(is_int16(offset)); |
| 1903 DCHECK(!ra.is(r0)); |
| 1904 int imm16 = offset & kImm16Mask; |
| 1905 // could be x_form instruction with some casting magic |
| 1906 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16); |
| 1907 } |
| 1908 |
| 1909 |
| 1910 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) { |
| 1911 int offset = src.offset(); |
| 1912 Register ra = src.ra(); |
| 1913 DCHECK(is_int16(offset)); |
| 1914 DCHECK(!ra.is(r0)); |
| 1915 int imm16 = offset & kImm16Mask; |
| 1916 // could be x_form instruction with some casting magic |
| 1917 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16); |
| 1918 } |
| 1919 |
| 1920 |
| 1921 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) { |
| 1922 Register ra = src.ra(); |
| 1923 Register rb = src.rb(); |
| 1924 DCHECK(!ra.is(r0)); |
| 1925 emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1926 LeaveRC); |
| 1927 } |
| 1928 |
| 1929 |
| 1930 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) { |
| 1931 Register ra = src.ra(); |
| 1932 Register rb = src.rb(); |
| 1933 DCHECK(!ra.is(r0)); |
| 1934 emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1935 LeaveRC); |
| 1936 } |
| 1937 |
| 1938 |
| 1939 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra, |
| 1940 const DoubleRegister frb, RCBit rc) { |
| 1941 a_form(EXT4 | FSUB, frt, fra, frb, rc); |
| 1942 } |
| 1943 |
| 1944 |
| 1945 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra, |
| 1946 const DoubleRegister frb, RCBit rc) { |
| 1947 a_form(EXT4 | FADD, frt, fra, frb, rc); |
| 1948 } |
| 1949 |
| 1950 |
| 1951 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra, |
| 1952 const DoubleRegister frc, RCBit rc) { |
| 1953 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 | |
| 1954 rc); |
| 1955 } |
| 1956 |
| 1957 |
| 1958 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra, |
| 1959 const DoubleRegister frb, RCBit rc) { |
| 1960 a_form(EXT4 | FDIV, frt, fra, frb, rc); |
| 1961 } |
| 1962 |
| 1963 |
| 1964 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb, |
| 1965 CRegister cr) { |
| 1966 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1967 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11); |
| 1968 } |
| 1969 |
| 1970 |
| 1971 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb, |
| 1972 RCBit rc) { |
| 1973 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc); |
| 1974 } |
| 1975 |
| 1976 |
| 1977 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) { |
| 1978 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11); |
| 1979 } |
| 1980 |
| 1981 |
| 1982 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) { |
| 1983 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11); |
| 1984 } |
| 1985 |
| 1986 |
| 1987 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) { |
| 1988 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11); |
| 1989 } |
| 1990 |
| 1991 |
| 1992 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb, |
| 1993 RCBit rc) { |
| 1994 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc); |
| 1995 } |
| 1996 |
| 1997 |
| 1998 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb, |
| 1999 RCBit rc) { |
| 2000 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc); |
| 2001 } |
| 2002 |
| 2003 |
| 2004 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb, |
| 2005 RCBit rc) { |
| 2006 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc); |
| 2007 } |
| 2008 |
| 2009 |
| 2010 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb, |
| 2011 RCBit rc) { |
| 2012 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc); |
| 2013 } |
| 2014 |
| 2015 |
| 2016 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra, |
| 2017 const DoubleRegister frc, const DoubleRegister frb, |
| 2018 RCBit rc) { |
| 2019 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | |
| 2020 frc.code() * B6 | rc); |
| 2021 } |
| 2022 |
| 2023 |
| 2024 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb, |
| 2025 RCBit rc) { |
| 2026 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc); |
| 2027 } |
| 2028 |
| 2029 |
| 2030 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) { |
| 2031 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc); |
| 2032 } |
| 2033 |
| 2034 |
| 2035 void Assembler::mffs(const DoubleRegister frt, RCBit rc) { |
| 2036 emit(EXT4 | MFFS | frt.code() * B21 | rc); |
| 2037 } |
| 2038 |
| 2039 |
| 2040 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W, |
| 2041 RCBit rc) { |
| 2042 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc); |
| 2043 } |
| 2044 |
| 2045 |
| 2046 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb, |
| 2047 RCBit rc) { |
| 2048 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc); |
| 2049 } |
| 2050 |
| 2051 |
| 2052 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb, |
| 2053 RCBit rc) { |
| 2054 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc); |
| 2055 } |
| 2056 |
| 2057 |
| 2058 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra, |
| 2059 const DoubleRegister frc, const DoubleRegister frb, |
| 2060 RCBit rc) { |
| 2061 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | |
| 2062 frc.code() * B6 | rc); |
| 2063 } |
| 2064 |
| 2065 |
| 2066 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra, |
| 2067 const DoubleRegister frc, const DoubleRegister frb, |
| 2068 RCBit rc) { |
| 2069 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | |
| 2070 frc.code() * B6 | rc); |
| 2071 } |
| 2072 |
| 2073 |
| 2074 // Pseudo instructions. |
| 2075 void Assembler::nop(int type) { |
| 2076 Register reg = r0; |
| 2077 switch (type) { |
| 2078 case NON_MARKING_NOP: |
| 2079 reg = r0; |
| 2080 break; |
| 2081 case GROUP_ENDING_NOP: |
| 2082 reg = r2; |
| 2083 break; |
| 2084 case DEBUG_BREAK_NOP: |
| 2085 reg = r3; |
| 2086 break; |
| 2087 default: |
| 2088 UNIMPLEMENTED(); |
| 2089 } |
| 2090 |
| 2091 ori(reg, reg, Operand::Zero()); |
| 2092 } |
| 2093 |
| 2094 |
| 2095 bool Assembler::IsNop(Instr instr, int type) { |
| 2096 int reg = 0; |
| 2097 switch (type) { |
| 2098 case NON_MARKING_NOP: |
| 2099 reg = 0; |
| 2100 break; |
| 2101 case GROUP_ENDING_NOP: |
| 2102 reg = 2; |
| 2103 break; |
| 2104 case DEBUG_BREAK_NOP: |
| 2105 reg = 3; |
| 2106 break; |
| 2107 default: |
| 2108 UNIMPLEMENTED(); |
| 2109 } |
| 2110 return instr == (ORI | reg * B21 | reg * B16); |
| 2111 } |
| 2112 |
| 2113 |
| 2114 // Debugging. |
| 2115 void Assembler::RecordJSReturn() { |
| 2116 positions_recorder()->WriteRecordedPositions(); |
| 2117 CheckBuffer(); |
| 2118 RecordRelocInfo(RelocInfo::JS_RETURN); |
| 2119 } |
| 2120 |
| 2121 |
| 2122 void Assembler::RecordDebugBreakSlot() { |
| 2123 positions_recorder()->WriteRecordedPositions(); |
| 2124 CheckBuffer(); |
| 2125 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); |
| 2126 } |
| 2127 |
| 2128 |
| 2129 void Assembler::RecordComment(const char* msg) { |
| 2130 if (FLAG_code_comments) { |
| 2131 CheckBuffer(); |
| 2132 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
| 2133 } |
| 2134 } |
| 2135 |
| 2136 |
| 2137 void Assembler::GrowBuffer() { |
| 2138 if (!own_buffer_) FATAL("external code buffer is too small"); |
| 2139 |
| 2140 // Compute new buffer size. |
| 2141 CodeDesc desc; // the new buffer |
| 2142 if (buffer_size_ < 4 * KB) { |
| 2143 desc.buffer_size = 4 * KB; |
| 2144 } else if (buffer_size_ < 1 * MB) { |
| 2145 desc.buffer_size = 2 * buffer_size_; |
| 2146 } else { |
| 2147 desc.buffer_size = buffer_size_ + 1 * MB; |
| 2148 } |
| 2149 CHECK_GT(desc.buffer_size, 0); // no overflow |
| 2150 |
| 2151 // Set up new buffer. |
| 2152 desc.buffer = NewArray<byte>(desc.buffer_size); |
| 2153 |
| 2154 desc.instr_size = pc_offset(); |
| 2155 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 2156 |
| 2157 // Copy the data. |
| 2158 intptr_t pc_delta = desc.buffer - buffer_; |
| 2159 intptr_t rc_delta = |
| 2160 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); |
| 2161 memmove(desc.buffer, buffer_, desc.instr_size); |
| 2162 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), |
| 2163 desc.reloc_size); |
| 2164 |
| 2165 // Switch buffers. |
| 2166 DeleteArray(buffer_); |
| 2167 buffer_ = desc.buffer; |
| 2168 buffer_size_ = desc.buffer_size; |
| 2169 pc_ += pc_delta; |
| 2170 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
| 2171 reloc_info_writer.last_pc() + pc_delta); |
| 2172 |
| 2173 // None of our relocation types are pc relative pointing outside the code |
| 2174 // buffer nor pc absolute pointing inside the code buffer, so there is no need |
| 2175 // to relocate any emitted relocation entries. |
| 2176 |
| 2177 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 2178 // Relocate runtime entries. |
| 2179 for (RelocIterator it(desc); !it.done(); it.next()) { |
| 2180 RelocInfo::Mode rmode = it.rinfo()->rmode(); |
| 2181 if (rmode == RelocInfo::INTERNAL_REFERENCE) { |
| 2182 RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0); |
| 2183 } |
| 2184 } |
| 2185 #if V8_OOL_CONSTANT_POOL |
| 2186 constant_pool_builder_.Relocate(pc_delta); |
| 2187 #endif |
| 2188 #endif |
| 2189 } |
| 2190 |
| 2191 |
| 2192 void Assembler::db(uint8_t data) { |
| 2193 CheckBuffer(); |
| 2194 *reinterpret_cast<uint8_t*>(pc_) = data; |
| 2195 pc_ += sizeof(uint8_t); |
| 2196 } |
| 2197 |
| 2198 |
| 2199 void Assembler::dd(uint32_t data) { |
| 2200 CheckBuffer(); |
| 2201 *reinterpret_cast<uint32_t*>(pc_) = data; |
| 2202 pc_ += sizeof(uint32_t); |
| 2203 } |
| 2204 |
| 2205 |
| 2206 void Assembler::emit_ptr(uintptr_t data) { |
| 2207 CheckBuffer(); |
| 2208 *reinterpret_cast<uintptr_t*>(pc_) = data; |
| 2209 pc_ += sizeof(uintptr_t); |
| 2210 } |
| 2211 |
| 2212 |
| 2213 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 2214 RelocInfo rinfo(pc_, rmode, data, NULL); |
| 2215 RecordRelocInfo(rinfo); |
| 2216 } |
| 2217 |
| 2218 |
| 2219 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { |
| 2220 if (rinfo.rmode() >= RelocInfo::JS_RETURN && |
| 2221 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) { |
| 2222 // Adjust code for new modes. |
| 2223 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) || |
| 2224 RelocInfo::IsJSReturn(rinfo.rmode()) || |
| 2225 RelocInfo::IsComment(rinfo.rmode()) || |
| 2226 RelocInfo::IsPosition(rinfo.rmode())); |
| 2227 } |
| 2228 if (!RelocInfo::IsNone(rinfo.rmode())) { |
| 2229 // Don't record external references unless the heap will be serialized. |
| 2230 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { |
| 2231 if (!serializer_enabled() && !emit_debug_code()) { |
| 2232 return; |
| 2233 } |
| 2234 } |
| 2235 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
| 2236 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { |
| 2237 RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(), |
| 2238 RecordedAstId().ToInt(), NULL); |
| 2239 ClearRecordedAstId(); |
| 2240 reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 2241 } else { |
| 2242 reloc_info_writer.Write(&rinfo); |
| 2243 } |
| 2244 } |
| 2245 } |
| 2246 |
| 2247 |
| 2248 void Assembler::BlockTrampolinePoolFor(int instructions) { |
| 2249 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); |
| 2250 } |
| 2251 |
| 2252 |
| 2253 void Assembler::CheckTrampolinePool() { |
| 2254 // Some small sequences of instructions must not be broken up by the |
| 2255 // insertion of a trampoline pool; such sequences are protected by setting |
| 2256 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, |
| 2257 // which are both checked here. Also, recursive calls to CheckTrampolinePool |
| 2258 // are blocked by trampoline_pool_blocked_nesting_. |
| 2259 if ((trampoline_pool_blocked_nesting_ > 0) || |
| 2260 (pc_offset() < no_trampoline_pool_before_)) { |
| 2261 // Emission is currently blocked; make sure we try again as soon as |
| 2262 // possible. |
| 2263 if (trampoline_pool_blocked_nesting_ > 0) { |
| 2264 next_buffer_check_ = pc_offset() + kInstrSize; |
| 2265 } else { |
| 2266 next_buffer_check_ = no_trampoline_pool_before_; |
| 2267 } |
| 2268 return; |
| 2269 } |
| 2270 |
| 2271 DCHECK(!trampoline_emitted_); |
| 2272 DCHECK(unbound_labels_count_ >= 0); |
| 2273 if (unbound_labels_count_ > 0) { |
| 2274 // First we emit jump, then we emit trampoline pool. |
| 2275 { |
| 2276 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 2277 Label after_pool; |
| 2278 b(&after_pool); |
| 2279 |
| 2280 int pool_start = pc_offset(); |
| 2281 for (int i = 0; i < unbound_labels_count_; i++) { |
| 2282 b(&after_pool); |
| 2283 } |
| 2284 bind(&after_pool); |
| 2285 trampoline_ = Trampoline(pool_start, unbound_labels_count_); |
| 2286 |
| 2287 trampoline_emitted_ = true; |
| 2288 // As we are only going to emit trampoline once, we need to prevent any |
| 2289 // further emission. |
| 2290 next_buffer_check_ = kMaxInt; |
| 2291 } |
| 2292 } else { |
| 2293 // Number of branches to unbound label at this point is zero, so we can |
| 2294 // move next buffer check to maximum. |
| 2295 next_buffer_check_ = |
| 2296 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; |
| 2297 } |
| 2298 return; |
| 2299 } |
| 2300 |
| 2301 |
| 2302 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { |
| 2303 #if V8_OOL_CONSTANT_POOL |
| 2304 return constant_pool_builder_.New(isolate); |
| 2305 #else |
| 2306 // No out-of-line constant pool support. |
| 2307 DCHECK(!FLAG_enable_ool_constant_pool); |
| 2308 return isolate->factory()->empty_constant_pool_array(); |
| 2309 #endif |
| 2310 } |
| 2311 |
| 2312 |
| 2313 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { |
| 2314 #if V8_OOL_CONSTANT_POOL |
| 2315 constant_pool_builder_.Populate(this, constant_pool); |
| 2316 #else |
| 2317 // No out-of-line constant pool support. |
| 2318 DCHECK(!FLAG_enable_ool_constant_pool); |
| 2319 #endif |
| 2320 } |
| 2321 |
| 2322 |
| 2323 #if V8_OOL_CONSTANT_POOL |
| 2324 ConstantPoolBuilder::ConstantPoolBuilder() |
| 2325 : size_(0), |
| 2326 entries_(), |
| 2327 current_section_(ConstantPoolArray::SMALL_SECTION) {} |
| 2328 |
| 2329 |
| 2330 bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; } |
| 2331 |
| 2332 |
| 2333 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( |
| 2334 RelocInfo::Mode rmode) { |
| 2335 #if V8_TARGET_ARCH_PPC64 |
| 2336 // We don't support 32-bit entries at this time. |
| 2337 if (!RelocInfo::IsGCRelocMode(rmode)) { |
| 2338 return ConstantPoolArray::INT64; |
| 2339 #else |
| 2340 if (rmode == RelocInfo::NONE64) { |
| 2341 return ConstantPoolArray::INT64; |
| 2342 } else if (!RelocInfo::IsGCRelocMode(rmode)) { |
| 2343 return ConstantPoolArray::INT32; |
| 2344 #endif |
| 2345 } else if (RelocInfo::IsCodeTarget(rmode)) { |
| 2346 return ConstantPoolArray::CODE_PTR; |
| 2347 } else { |
| 2348 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); |
| 2349 return ConstantPoolArray::HEAP_PTR; |
| 2350 } |
| 2351 } |
| 2352 |
| 2353 |
| 2354 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( |
| 2355 Assembler* assm, const RelocInfo& rinfo) { |
| 2356 RelocInfo::Mode rmode = rinfo.rmode(); |
| 2357 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION && |
| 2358 rmode != RelocInfo::STATEMENT_POSITION && |
| 2359 rmode != RelocInfo::CONST_POOL); |
| 2360 |
| 2361 // Try to merge entries which won't be patched. |
| 2362 int merged_index = -1; |
| 2363 ConstantPoolArray::LayoutSection entry_section = current_section_; |
| 2364 if (RelocInfo::IsNone(rmode) || |
| 2365 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { |
| 2366 size_t i; |
| 2367 std::vector<ConstantPoolEntry>::const_iterator it; |
| 2368 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { |
| 2369 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { |
| 2370 // Merge with found entry. |
| 2371 merged_index = i; |
| 2372 entry_section = entries_[i].section_; |
| 2373 break; |
| 2374 } |
| 2375 } |
| 2376 } |
| 2377 DCHECK(entry_section <= current_section_); |
| 2378 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); |
| 2379 |
| 2380 if (merged_index == -1) { |
| 2381 // Not merged, so update the appropriate count. |
| 2382 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); |
| 2383 } |
| 2384 |
| 2385 // Check if we still have room for another entry in the small section |
| 2386 // given the limitations of the header's layout fields. |
| 2387 if (current_section_ == ConstantPoolArray::SMALL_SECTION) { |
| 2388 size_ = ConstantPoolArray::SizeFor(*small_entries()); |
| 2389 if (!is_uint12(size_)) { |
| 2390 current_section_ = ConstantPoolArray::EXTENDED_SECTION; |
| 2391 } |
| 2392 } else { |
| 2393 size_ = ConstantPoolArray::SizeForExtended(*small_entries(), |
| 2394 *extended_entries()); |
| 2395 } |
| 2396 |
| 2397 return entry_section; |
| 2398 } |
| 2399 |
| 2400 |
| 2401 void ConstantPoolBuilder::Relocate(intptr_t pc_delta) { |
| 2402 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
| 2403 entry != entries_.end(); entry++) { |
| 2404 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); |
| 2405 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); |
| 2406 } |
| 2407 } |
| 2408 |
| 2409 |
| 2410 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { |
| 2411 if (IsEmpty()) { |
| 2412 return isolate->factory()->empty_constant_pool_array(); |
| 2413 } else if (extended_entries()->is_empty()) { |
| 2414 return isolate->factory()->NewConstantPoolArray(*small_entries()); |
| 2415 } else { |
| 2416 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION); |
| 2417 return isolate->factory()->NewExtendedConstantPoolArray( |
| 2418 *small_entries(), *extended_entries()); |
| 2419 } |
| 2420 } |
| 2421 |
| 2422 |
| 2423 void ConstantPoolBuilder::Populate(Assembler* assm, |
| 2424 ConstantPoolArray* constant_pool) { |
| 2425 DCHECK_EQ(extended_entries()->is_empty(), |
| 2426 !constant_pool->is_extended_layout()); |
| 2427 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries( |
| 2428 constant_pool, ConstantPoolArray::SMALL_SECTION))); |
| 2429 if (constant_pool->is_extended_layout()) { |
| 2430 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( |
| 2431 constant_pool, ConstantPoolArray::EXTENDED_SECTION))); |
| 2432 } |
| 2433 |
| 2434 // Set up initial offsets. |
| 2435 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS] |
| 2436 [ConstantPoolArray::NUMBER_OF_TYPES]; |
| 2437 for (int section = 0; section <= constant_pool->final_section(); section++) { |
| 2438 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION) |
| 2439 ? small_entries()->total_count() |
| 2440 : 0; |
| 2441 for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) { |
| 2442 ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i); |
| 2443 if (number_of_entries_[section].count_of(type) != 0) { |
| 2444 offsets[section][type] = constant_pool->OffsetOfElementAt( |
| 2445 number_of_entries_[section].base_of(type) + section_start); |
| 2446 } |
| 2447 } |
| 2448 } |
| 2449 |
| 2450 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
| 2451 entry != entries_.end(); entry++) { |
| 2452 RelocInfo rinfo = entry->rinfo_; |
| 2453 RelocInfo::Mode rmode = entry->rinfo_.rmode(); |
| 2454 ConstantPoolArray::Type type = GetConstantPoolType(rmode); |
| 2455 |
| 2456 // Update constant pool if necessary and get the entry's offset. |
| 2457 int offset; |
| 2458 if (entry->merged_index_ == -1) { |
| 2459 offset = offsets[entry->section_][type]; |
| 2460 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type); |
| 2461 if (type == ConstantPoolArray::INT64) { |
| 2462 #if V8_TARGET_ARCH_PPC64 |
| 2463 constant_pool->set_at_offset(offset, rinfo.data()); |
| 2464 #else |
| 2465 constant_pool->set_at_offset(offset, rinfo.data64()); |
| 2466 } else if (type == ConstantPoolArray::INT32) { |
| 2467 constant_pool->set_at_offset(offset, |
| 2468 static_cast<int32_t>(rinfo.data())); |
| 2469 #endif |
| 2470 } else if (type == ConstantPoolArray::CODE_PTR) { |
| 2471 constant_pool->set_at_offset(offset, |
| 2472 reinterpret_cast<Address>(rinfo.data())); |
| 2473 } else { |
| 2474 DCHECK(type == ConstantPoolArray::HEAP_PTR); |
| 2475 constant_pool->set_at_offset(offset, |
| 2476 reinterpret_cast<Object*>(rinfo.data())); |
| 2477 } |
| 2478 offset -= kHeapObjectTag; |
| 2479 entry->merged_index_ = offset; // Stash offset for merged entries. |
| 2480 } else { |
| 2481 DCHECK(entry->merged_index_ < (entry - entries_.begin())); |
| 2482 offset = entries_[entry->merged_index_].merged_index_; |
| 2483 } |
| 2484 |
| 2485 // Patch load instruction with correct offset. |
| 2486 Assembler::SetConstantPoolOffset(rinfo.pc(), offset); |
| 2487 } |
| 2488 } |
| 2489 #endif |
| 2490 } |
| 2491 } // namespace v8::internal |
| 2492 |
| 2493 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |