OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions |
| 6 // are met: |
| 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. |
| 10 // |
| 11 // - Redistribution in binary form must reproduce the above copyright |
| 12 // notice, this list of conditions and the following disclaimer in the |
| 13 // documentation and/or other materials provided with the |
| 14 // distribution. |
| 15 // |
| 16 // - Neither the name of Sun Microsystems or the names of contributors may |
| 17 // be used to endorse or promote products derived from this software without |
| 18 // specific prior written permission. |
| 19 // |
| 20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
| 31 // OF THE POSSIBILITY OF SUCH DAMAGE. |
| 32 |
| 33 // The original source code covered by the above license above has been |
| 34 // modified significantly by Google Inc. |
| 35 // Copyright 2012 the V8 project authors. All rights reserved. |
| 36 |
| 37 // |
| 38 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 39 // |
| 40 |
| 41 #include "src/v8.h" |
| 42 |
| 43 #if V8_TARGET_ARCH_PPC |
| 44 |
| 45 #include "src/base/bits.h" |
| 46 #include "src/base/cpu.h" |
| 47 #include "src/macro-assembler.h" |
| 48 #include "src/ppc/assembler-ppc-inl.h" |
| 49 #include "src/serialize.h" |
| 50 |
| 51 namespace v8 { |
| 52 namespace internal { |
| 53 |
| 54 // Get the CPU features enabled by the build. |
| 55 static unsigned CpuFeaturesImpliedByCompiler() { |
| 56 unsigned answer = 0; |
| 57 return answer; |
| 58 } |
| 59 |
| 60 |
| 61 void CpuFeatures::ProbeImpl(bool cross_compile) { |
| 62 supported_ |= CpuFeaturesImpliedByCompiler(); |
| 63 cache_line_size_ = 128; |
| 64 |
| 65 // Only use statically determined features for cross compile (snapshot). |
| 66 if (cross_compile) return; |
| 67 |
| 68 // Detect whether frim instruction is supported (POWER5+) |
| 69 // For now we will just check for processors we know do not |
| 70 // support it |
| 71 #ifndef USE_SIMULATOR |
| 72 // Probe for additional features at runtime. |
| 73 base::CPU cpu; |
| 74 #if V8_TARGET_ARCH_PPC64 |
| 75 if (cpu.part() == base::CPU::PPC_POWER8) { |
| 76 supported_ |= (1u << FPR_GPR_MOV); |
| 77 } |
| 78 #endif |
| 79 if (cpu.part() == base::CPU::PPC_POWER6 || |
| 80 cpu.part() == base::CPU::PPC_POWER7 || |
| 81 cpu.part() == base::CPU::PPC_POWER8) { |
| 82 supported_ |= (1u << LWSYNC); |
| 83 } |
| 84 #if V8_OS_LINUX |
| 85 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) { |
| 86 // Assume support |
| 87 supported_ |= (1u << FPU); |
| 88 } |
| 89 if (cpu.cache_line_size() != 0) { |
| 90 cache_line_size_ = cpu.cache_line_size(); |
| 91 } |
| 92 #elif V8_OS_AIX |
| 93 // Assume support FP support and default cache line size |
| 94 supported_ |= (1u << FPU); |
| 95 #endif |
| 96 #else // Simulator |
| 97 supported_ |= (1u << FPU); |
| 98 supported_ |= (1u << LWSYNC); |
| 99 #if V8_TARGET_ARCH_PPC64 |
| 100 supported_ |= (1u << FPR_GPR_MOV); |
| 101 #endif |
| 102 #endif |
| 103 } |
| 104 |
| 105 |
| 106 void CpuFeatures::PrintTarget() { |
| 107 const char* ppc_arch = NULL; |
| 108 |
| 109 #if V8_TARGET_ARCH_PPC64 |
| 110 ppc_arch = "ppc64"; |
| 111 #else |
| 112 ppc_arch = "ppc"; |
| 113 #endif |
| 114 |
| 115 printf("target %s\n", ppc_arch); |
| 116 } |
| 117 |
| 118 |
| 119 void CpuFeatures::PrintFeatures() { |
| 120 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU)); |
| 121 } |
| 122 |
| 123 |
| 124 Register ToRegister(int num) { |
| 125 DCHECK(num >= 0 && num < kNumRegisters); |
| 126 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7, |
| 127 r8, r9, r10, r11, ip, r13, r14, r15, |
| 128 r16, r17, r18, r19, r20, r21, r22, r23, |
| 129 r24, r25, r26, r27, r28, r29, r30, fp}; |
| 130 return kRegisters[num]; |
| 131 } |
| 132 |
| 133 |
| 134 const char* DoubleRegister::AllocationIndexToString(int index) { |
| 135 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 136 const char* const names[] = { |
| 137 "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", |
| 138 "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", |
| 139 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"}; |
| 140 return names[index]; |
| 141 } |
| 142 |
| 143 |
| 144 // ----------------------------------------------------------------------------- |
| 145 // Implementation of RelocInfo |
| 146 |
| 147 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; |
| 148 |
| 149 |
| 150 bool RelocInfo::IsCodedSpecially() { |
| 151 // The deserializer needs to know whether a pointer is specially |
| 152 // coded. Being specially coded on PPC means that it is a lis/ori |
| 153 // instruction sequence or is an out of line constant pool entry, |
| 154 // and these are always the case inside code objects. |
| 155 return true; |
| 156 } |
| 157 |
| 158 |
| 159 bool RelocInfo::IsInConstantPool() { |
| 160 #if V8_OOL_CONSTANT_POOL |
| 161 return Assembler::IsConstantPoolLoadStart(pc_); |
| 162 #else |
| 163 return false; |
| 164 #endif |
| 165 } |
| 166 |
| 167 |
| 168 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { |
| 169 // Patch the code at the current address with the supplied instructions. |
| 170 Instr* pc = reinterpret_cast<Instr*>(pc_); |
| 171 Instr* instr = reinterpret_cast<Instr*>(instructions); |
| 172 for (int i = 0; i < instruction_count; i++) { |
| 173 *(pc + i) = *(instr + i); |
| 174 } |
| 175 |
| 176 // Indicate that code has changed. |
| 177 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize); |
| 178 } |
| 179 |
| 180 |
| 181 // Patch the code at the current PC with a call to the target address. |
| 182 // Additional guard instructions can be added if required. |
| 183 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { |
| 184 // Patch the code at the current address with a call to the target. |
| 185 UNIMPLEMENTED(); |
| 186 } |
| 187 |
| 188 |
| 189 // ----------------------------------------------------------------------------- |
| 190 // Implementation of Operand and MemOperand |
| 191 // See assembler-ppc-inl.h for inlined constructors |
| 192 |
| 193 Operand::Operand(Handle<Object> handle) { |
| 194 AllowDeferredHandleDereference using_raw_address; |
| 195 rm_ = no_reg; |
| 196 // Verify all Objects referred by code are NOT in new space. |
| 197 Object* obj = *handle; |
| 198 if (obj->IsHeapObject()) { |
| 199 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
| 200 imm_ = reinterpret_cast<intptr_t>(handle.location()); |
| 201 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
| 202 } else { |
| 203 // no relocation needed |
| 204 imm_ = reinterpret_cast<intptr_t>(obj); |
| 205 rmode_ = kRelocInfo_NONEPTR; |
| 206 } |
| 207 } |
| 208 |
| 209 |
| 210 MemOperand::MemOperand(Register rn, int32_t offset) { |
| 211 ra_ = rn; |
| 212 rb_ = no_reg; |
| 213 offset_ = offset; |
| 214 } |
| 215 |
| 216 |
| 217 MemOperand::MemOperand(Register ra, Register rb) { |
| 218 ra_ = ra; |
| 219 rb_ = rb; |
| 220 offset_ = 0; |
| 221 } |
| 222 |
| 223 |
| 224 // ----------------------------------------------------------------------------- |
| 225 // Specific instructions, constants, and masks. |
| 226 |
| 227 // Spare buffer. |
| 228 static const int kMinimalBufferSize = 4 * KB; |
| 229 |
| 230 |
| 231 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 232 : AssemblerBase(isolate, buffer, buffer_size), |
| 233 recorded_ast_id_(TypeFeedbackId::None()), |
| 234 #if V8_OOL_CONSTANT_POOL |
| 235 constant_pool_builder_(), |
| 236 #endif |
| 237 positions_recorder_(this) { |
| 238 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); |
| 239 |
| 240 no_trampoline_pool_before_ = 0; |
| 241 trampoline_pool_blocked_nesting_ = 0; |
| 242 // We leave space (kMaxBlockTrampolineSectionSize) |
| 243 // for BlockTrampolinePoolScope buffer. |
| 244 next_buffer_check_ = |
| 245 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach - |
| 246 kMaxBlockTrampolineSectionSize; |
| 247 internal_trampoline_exception_ = false; |
| 248 last_bound_pos_ = 0; |
| 249 |
| 250 trampoline_emitted_ = FLAG_force_long_branches; |
| 251 unbound_labels_count_ = 0; |
| 252 |
| 253 #if V8_OOL_CONSTANT_POOL |
| 254 constant_pool_available_ = false; |
| 255 #endif |
| 256 |
| 257 ClearRecordedAstId(); |
| 258 } |
| 259 |
| 260 |
| 261 void Assembler::GetCode(CodeDesc* desc) { |
| 262 // Set up code descriptor. |
| 263 desc->buffer = buffer_; |
| 264 desc->buffer_size = buffer_size_; |
| 265 desc->instr_size = pc_offset(); |
| 266 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 267 desc->origin = this; |
| 268 } |
| 269 |
| 270 |
| 271 void Assembler::Align(int m) { |
| 272 #if V8_TARGET_ARCH_PPC64 |
| 273 DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m)); |
| 274 #else |
| 275 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m)); |
| 276 #endif |
| 277 while ((pc_offset() & (m - 1)) != 0) { |
| 278 nop(); |
| 279 } |
| 280 } |
| 281 |
| 282 |
| 283 void Assembler::CodeTargetAlign() { Align(8); } |
| 284 |
| 285 |
| 286 Condition Assembler::GetCondition(Instr instr) { |
| 287 switch (instr & kCondMask) { |
| 288 case BT: |
| 289 return eq; |
| 290 case BF: |
| 291 return ne; |
| 292 default: |
| 293 UNIMPLEMENTED(); |
| 294 } |
| 295 return al; |
| 296 } |
| 297 |
| 298 |
| 299 bool Assembler::IsLis(Instr instr) { |
| 300 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0); |
| 301 } |
| 302 |
| 303 |
| 304 bool Assembler::IsLi(Instr instr) { |
| 305 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0); |
| 306 } |
| 307 |
| 308 |
| 309 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; } |
| 310 |
| 311 |
| 312 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; } |
| 313 |
| 314 |
| 315 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); } |
| 316 |
| 317 |
| 318 Register Assembler::GetRA(Instr instr) { |
| 319 Register reg; |
| 320 reg.code_ = Instruction::RAValue(instr); |
| 321 return reg; |
| 322 } |
| 323 |
| 324 |
| 325 Register Assembler::GetRB(Instr instr) { |
| 326 Register reg; |
| 327 reg.code_ = Instruction::RBValue(instr); |
| 328 return reg; |
| 329 } |
| 330 |
| 331 |
| 332 #if V8_TARGET_ARCH_PPC64 |
| 333 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori) |
| 334 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3, |
| 335 Instr instr4, Instr instr5) { |
| 336 // Check the instructions are indeed a five part load (into r12) |
| 337 // 3d800000 lis r12, 0 |
| 338 // 618c0000 ori r12, r12, 0 |
| 339 // 798c07c6 rldicr r12, r12, 32, 31 |
| 340 // 658c00c3 oris r12, r12, 195 |
| 341 // 618ccd40 ori r12, r12, 52544 |
| 342 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) && |
| 343 (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) && |
| 344 ((instr5 >> 16) == 0x618c)); |
| 345 } |
| 346 #else |
| 347 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori) |
| 348 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) { |
| 349 // Check the instruction is indeed a two part load (into r12) |
| 350 // 3d802553 lis r12, 9555 |
| 351 // 618c5000 ori r12, r12, 20480 |
| 352 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c)); |
| 353 } |
| 354 #endif |
| 355 |
| 356 |
| 357 bool Assembler::IsCmpRegister(Instr instr) { |
| 358 return (((instr & kOpcodeMask) == EXT2) && |
| 359 ((instr & kExt2OpcodeMask) == CMP)); |
| 360 } |
| 361 |
| 362 |
| 363 bool Assembler::IsRlwinm(Instr instr) { |
| 364 return ((instr & kOpcodeMask) == RLWINMX); |
| 365 } |
| 366 |
| 367 |
| 368 #if V8_TARGET_ARCH_PPC64 |
| 369 bool Assembler::IsRldicl(Instr instr) { |
| 370 return (((instr & kOpcodeMask) == EXT5) && |
| 371 ((instr & kExt5OpcodeMask) == RLDICL)); |
| 372 } |
| 373 #endif |
| 374 |
| 375 |
| 376 bool Assembler::IsCmpImmediate(Instr instr) { |
| 377 return ((instr & kOpcodeMask) == CMPI); |
| 378 } |
| 379 |
| 380 |
| 381 bool Assembler::IsCrSet(Instr instr) { |
| 382 return (((instr & kOpcodeMask) == EXT1) && |
| 383 ((instr & kExt1OpcodeMask) == CREQV)); |
| 384 } |
| 385 |
| 386 |
| 387 Register Assembler::GetCmpImmediateRegister(Instr instr) { |
| 388 DCHECK(IsCmpImmediate(instr)); |
| 389 return GetRA(instr); |
| 390 } |
| 391 |
| 392 |
| 393 int Assembler::GetCmpImmediateRawImmediate(Instr instr) { |
| 394 DCHECK(IsCmpImmediate(instr)); |
| 395 return instr & kOff16Mask; |
| 396 } |
| 397 |
| 398 |
| 399 // Labels refer to positions in the (to be) generated code. |
| 400 // There are bound, linked, and unused labels. |
| 401 // |
| 402 // Bound labels refer to known positions in the already |
| 403 // generated code. pos() is the position the label refers to. |
| 404 // |
| 405 // Linked labels refer to unknown positions in the code |
| 406 // to be generated; pos() is the position of the last |
| 407 // instruction using the label. |
| 408 |
| 409 |
| 410 // The link chain is terminated by a negative code position (must be aligned) |
| 411 const int kEndOfChain = -4; |
| 412 |
| 413 |
| 414 int Assembler::target_at(int pos) { |
| 415 Instr instr = instr_at(pos); |
| 416 // check which type of branch this is 16 or 26 bit offset |
| 417 int opcode = instr & kOpcodeMask; |
| 418 if (BX == opcode) { |
| 419 int imm26 = ((instr & kImm26Mask) << 6) >> 6; |
| 420 imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present |
| 421 if (imm26 == 0) return kEndOfChain; |
| 422 return pos + imm26; |
| 423 } else if (BCX == opcode) { |
| 424 int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask)); |
| 425 imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present |
| 426 if (imm16 == 0) return kEndOfChain; |
| 427 return pos + imm16; |
| 428 } else if ((instr & ~kImm26Mask) == 0) { |
| 429 // Emitted link to a label, not part of a branch (regexp PushBacktrack). |
| 430 if (instr == 0) { |
| 431 return kEndOfChain; |
| 432 } else { |
| 433 int32_t imm26 = SIGN_EXT_IMM26(instr); |
| 434 return (imm26 + pos); |
| 435 } |
| 436 } |
| 437 |
| 438 PPCPORT_UNIMPLEMENTED(); |
| 439 DCHECK(false); |
| 440 return -1; |
| 441 } |
| 442 |
| 443 |
| 444 void Assembler::target_at_put(int pos, int target_pos) { |
| 445 Instr instr = instr_at(pos); |
| 446 int opcode = instr & kOpcodeMask; |
| 447 |
| 448 // check which type of branch this is 16 or 26 bit offset |
| 449 if (BX == opcode) { |
| 450 int imm26 = target_pos - pos; |
| 451 DCHECK((imm26 & (kAAMask | kLKMask)) == 0); |
| 452 instr &= ((~kImm26Mask) | kAAMask | kLKMask); |
| 453 DCHECK(is_int26(imm26)); |
| 454 instr_at_put(pos, instr | (imm26 & kImm26Mask)); |
| 455 return; |
| 456 } else if (BCX == opcode) { |
| 457 int imm16 = target_pos - pos; |
| 458 DCHECK((imm16 & (kAAMask | kLKMask)) == 0); |
| 459 instr &= ((~kImm16Mask) | kAAMask | kLKMask); |
| 460 DCHECK(is_int16(imm16)); |
| 461 instr_at_put(pos, instr | (imm16 & kImm16Mask)); |
| 462 return; |
| 463 } else if ((instr & ~kImm26Mask) == 0) { |
| 464 DCHECK(target_pos == kEndOfChain || target_pos >= 0); |
| 465 // Emitted link to a label, not part of a branch (regexp PushBacktrack). |
| 466 // Load the position of the label relative to the generated code object |
| 467 // pointer in a register. |
| 468 |
| 469 Register dst = r3; // we assume r3 for now |
| 470 DCHECK(IsNop(instr_at(pos + kInstrSize))); |
| 471 uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag); |
| 472 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2, |
| 473 CodePatcher::DONT_FLUSH); |
| 474 int target_hi = static_cast<int>(target) >> 16; |
| 475 int target_lo = static_cast<int>(target) & 0XFFFF; |
| 476 |
| 477 patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi))); |
| 478 patcher.masm()->ori(dst, dst, Operand(target_lo)); |
| 479 return; |
| 480 } |
| 481 |
| 482 DCHECK(false); |
| 483 } |
| 484 |
| 485 |
| 486 int Assembler::max_reach_from(int pos) { |
| 487 Instr instr = instr_at(pos); |
| 488 int opcode = instr & kOpcodeMask; |
| 489 |
| 490 // check which type of branch this is 16 or 26 bit offset |
| 491 if (BX == opcode) { |
| 492 return 26; |
| 493 } else if (BCX == opcode) { |
| 494 return 16; |
| 495 } else if ((instr & ~kImm26Mask) == 0) { |
| 496 // Emitted label constant, not part of a branch (regexp PushBacktrack). |
| 497 return 26; |
| 498 } |
| 499 |
| 500 DCHECK(false); |
| 501 return 0; |
| 502 } |
| 503 |
| 504 |
| 505 void Assembler::bind_to(Label* L, int pos) { |
| 506 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position |
| 507 int32_t trampoline_pos = kInvalidSlotPos; |
| 508 if (L->is_linked() && !trampoline_emitted_) { |
| 509 unbound_labels_count_--; |
| 510 next_buffer_check_ += kTrampolineSlotsSize; |
| 511 } |
| 512 |
| 513 while (L->is_linked()) { |
| 514 int fixup_pos = L->pos(); |
| 515 int32_t offset = pos - fixup_pos; |
| 516 int maxReach = max_reach_from(fixup_pos); |
| 517 next(L); // call next before overwriting link with target at fixup_pos |
| 518 if (is_intn(offset, maxReach) == false) { |
| 519 if (trampoline_pos == kInvalidSlotPos) { |
| 520 trampoline_pos = get_trampoline_entry(); |
| 521 CHECK(trampoline_pos != kInvalidSlotPos); |
| 522 target_at_put(trampoline_pos, pos); |
| 523 } |
| 524 target_at_put(fixup_pos, trampoline_pos); |
| 525 } else { |
| 526 target_at_put(fixup_pos, pos); |
| 527 } |
| 528 } |
| 529 L->bind_to(pos); |
| 530 |
| 531 // Keep track of the last bound label so we don't eliminate any instructions |
| 532 // before a bound label. |
| 533 if (pos > last_bound_pos_) last_bound_pos_ = pos; |
| 534 } |
| 535 |
| 536 |
| 537 void Assembler::bind(Label* L) { |
| 538 DCHECK(!L->is_bound()); // label can only be bound once |
| 539 bind_to(L, pc_offset()); |
| 540 } |
| 541 |
| 542 |
| 543 void Assembler::next(Label* L) { |
| 544 DCHECK(L->is_linked()); |
| 545 int link = target_at(L->pos()); |
| 546 if (link == kEndOfChain) { |
| 547 L->Unuse(); |
| 548 } else { |
| 549 DCHECK(link >= 0); |
| 550 L->link_to(link); |
| 551 } |
| 552 } |
| 553 |
| 554 |
| 555 bool Assembler::is_near(Label* L, Condition cond) { |
| 556 DCHECK(L->is_bound()); |
| 557 if (L->is_bound() == false) return false; |
| 558 |
| 559 int maxReach = ((cond == al) ? 26 : 16); |
| 560 int offset = L->pos() - pc_offset(); |
| 561 |
| 562 return is_intn(offset, maxReach); |
| 563 } |
| 564 |
| 565 |
| 566 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra, |
| 567 DoubleRegister frb, RCBit r) { |
| 568 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r); |
| 569 } |
| 570 |
| 571 |
| 572 void Assembler::d_form(Instr instr, Register rt, Register ra, |
| 573 const intptr_t val, bool signed_disp) { |
| 574 if (signed_disp) { |
| 575 if (!is_int16(val)) { |
| 576 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val); |
| 577 } |
| 578 DCHECK(is_int16(val)); |
| 579 } else { |
| 580 if (!is_uint16(val)) { |
| 581 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR |
| 582 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n", |
| 583 val, val, is_uint16(val), kImm16Mask); |
| 584 } |
| 585 DCHECK(is_uint16(val)); |
| 586 } |
| 587 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val)); |
| 588 } |
| 589 |
| 590 |
| 591 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb, |
| 592 RCBit r) { |
| 593 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r); |
| 594 } |
| 595 |
| 596 |
| 597 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb, |
| 598 OEBit o, RCBit r) { |
| 599 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r); |
| 600 } |
| 601 |
| 602 |
| 603 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift, |
| 604 int maskbit, RCBit r) { |
| 605 int sh0_4 = shift & 0x1f; |
| 606 int sh5 = (shift >> 5) & 0x1; |
| 607 int m0_4 = maskbit & 0x1f; |
| 608 int m5 = (maskbit >> 5) & 0x1; |
| 609 |
| 610 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 | |
| 611 m5 * B5 | sh5 * B1 | r); |
| 612 } |
| 613 |
| 614 |
| 615 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb, |
| 616 int maskbit, RCBit r) { |
| 617 int m0_4 = maskbit & 0x1f; |
| 618 int m5 = (maskbit >> 5) & 0x1; |
| 619 |
| 620 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 | |
| 621 m5 * B5 | r); |
| 622 } |
| 623 |
| 624 |
| 625 // Returns the next free trampoline entry. |
| 626 int32_t Assembler::get_trampoline_entry() { |
| 627 int32_t trampoline_entry = kInvalidSlotPos; |
| 628 |
| 629 if (!internal_trampoline_exception_) { |
| 630 trampoline_entry = trampoline_.take_slot(); |
| 631 |
| 632 if (kInvalidSlotPos == trampoline_entry) { |
| 633 internal_trampoline_exception_ = true; |
| 634 } |
| 635 } |
| 636 return trampoline_entry; |
| 637 } |
| 638 |
| 639 |
| 640 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
| 641 int target_pos; |
| 642 if (L->is_bound()) { |
| 643 target_pos = L->pos(); |
| 644 } else { |
| 645 if (L->is_linked()) { |
| 646 target_pos = L->pos(); // L's link |
| 647 } else { |
| 648 // was: target_pos = kEndOfChain; |
| 649 // However, using branch to self to mark the first reference |
| 650 // should avoid most instances of branch offset overflow. See |
| 651 // target_at() for where this is converted back to kEndOfChain. |
| 652 target_pos = pc_offset(); |
| 653 if (!trampoline_emitted_) { |
| 654 unbound_labels_count_++; |
| 655 next_buffer_check_ -= kTrampolineSlotsSize; |
| 656 } |
| 657 } |
| 658 L->link_to(pc_offset()); |
| 659 } |
| 660 |
| 661 return target_pos - pc_offset(); |
| 662 } |
| 663 |
| 664 |
| 665 // Branch instructions. |
| 666 |
| 667 |
| 668 void Assembler::bclr(BOfield bo, LKBit lk) { |
| 669 positions_recorder()->WriteRecordedPositions(); |
| 670 emit(EXT1 | bo | BCLRX | lk); |
| 671 } |
| 672 |
| 673 |
| 674 void Assembler::bcctr(BOfield bo, LKBit lk) { |
| 675 positions_recorder()->WriteRecordedPositions(); |
| 676 emit(EXT1 | bo | BCCTRX | lk); |
| 677 } |
| 678 |
| 679 |
| 680 // Pseudo op - branch to link register |
| 681 void Assembler::blr() { bclr(BA, LeaveLK); } |
| 682 |
| 683 |
| 684 // Pseudo op - branch to count register -- used for "jump" |
| 685 void Assembler::bctr() { bcctr(BA, LeaveLK); } |
| 686 |
| 687 |
| 688 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) { |
| 689 if (lk == SetLK) { |
| 690 positions_recorder()->WriteRecordedPositions(); |
| 691 } |
| 692 DCHECK(is_int16(branch_offset)); |
| 693 emit(BCX | bo | condition_bit * B16 | (kImm16Mask & branch_offset) | lk); |
| 694 } |
| 695 |
| 696 |
| 697 void Assembler::b(int branch_offset, LKBit lk) { |
| 698 if (lk == SetLK) { |
| 699 positions_recorder()->WriteRecordedPositions(); |
| 700 } |
| 701 DCHECK((branch_offset & 3) == 0); |
| 702 int imm26 = branch_offset; |
| 703 DCHECK(is_int26(imm26)); |
| 704 // todo add AA and LK bits |
| 705 emit(BX | (imm26 & kImm26Mask) | lk); |
| 706 } |
| 707 |
| 708 |
| 709 void Assembler::xori(Register dst, Register src, const Operand& imm) { |
| 710 d_form(XORI, src, dst, imm.imm_, false); |
| 711 } |
| 712 |
| 713 |
| 714 void Assembler::xoris(Register ra, Register rs, const Operand& imm) { |
| 715 d_form(XORIS, rs, ra, imm.imm_, false); |
| 716 } |
| 717 |
| 718 |
| 719 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) { |
| 720 x_form(EXT2 | XORX, dst, src1, src2, rc); |
| 721 } |
| 722 |
| 723 |
| 724 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) { |
| 725 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc); |
| 726 } |
| 727 |
| 728 |
| 729 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) { |
| 730 x_form(EXT2 | ANDX, ra, rs, rb, rc); |
| 731 } |
| 732 |
| 733 |
| 734 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me, |
| 735 RCBit rc) { |
| 736 sh &= 0x1f; |
| 737 mb &= 0x1f; |
| 738 me &= 0x1f; |
| 739 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 | |
| 740 me << 1 | rc); |
| 741 } |
| 742 |
| 743 |
| 744 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me, |
| 745 RCBit rc) { |
| 746 mb &= 0x1f; |
| 747 me &= 0x1f; |
| 748 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 | |
| 749 me << 1 | rc); |
| 750 } |
| 751 |
| 752 |
| 753 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me, |
| 754 RCBit rc) { |
| 755 sh &= 0x1f; |
| 756 mb &= 0x1f; |
| 757 me &= 0x1f; |
| 758 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 | |
| 759 me << 1 | rc); |
| 760 } |
| 761 |
| 762 |
| 763 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) { |
| 764 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 765 rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc); |
| 766 } |
| 767 |
| 768 |
| 769 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) { |
| 770 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 771 rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc); |
| 772 } |
| 773 |
| 774 |
| 775 void Assembler::clrrwi(Register dst, Register src, const Operand& val, |
| 776 RCBit rc) { |
| 777 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 778 rlwinm(dst, src, 0, 0, 31 - val.imm_, rc); |
| 779 } |
| 780 |
| 781 |
| 782 void Assembler::clrlwi(Register dst, Register src, const Operand& val, |
| 783 RCBit rc) { |
| 784 DCHECK((32 > val.imm_) && (val.imm_ >= 0)); |
| 785 rlwinm(dst, src, 0, val.imm_, 31, rc); |
| 786 } |
| 787 |
| 788 |
| 789 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) { |
| 790 emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r); |
| 791 } |
| 792 |
| 793 |
| 794 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) { |
| 795 x_form(EXT2 | SRWX, dst, src1, src2, r); |
| 796 } |
| 797 |
| 798 |
| 799 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) { |
| 800 x_form(EXT2 | SLWX, dst, src1, src2, r); |
| 801 } |
| 802 |
| 803 |
| 804 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) { |
| 805 x_form(EXT2 | SRAW, ra, rs, rb, r); |
| 806 } |
| 807 |
| 808 |
| 809 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) { |
| 810 rlwnm(ra, rs, rb, 0, 31, r); |
| 811 } |
| 812 |
| 813 |
| 814 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) { |
| 815 rlwinm(ra, rs, sh, 0, 31, r); |
| 816 } |
| 817 |
| 818 |
| 819 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) { |
| 820 rlwinm(ra, rs, 32 - sh, 0, 31, r); |
| 821 } |
| 822 |
| 823 |
| 824 void Assembler::subi(Register dst, Register src, const Operand& imm) { |
| 825 addi(dst, src, Operand(-(imm.imm_))); |
| 826 } |
| 827 |
| 828 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o, |
| 829 RCBit r) { |
| 830 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r); |
| 831 } |
| 832 |
| 833 |
| 834 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) { |
| 835 // a special xo_form |
| 836 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r); |
| 837 } |
| 838 |
| 839 |
| 840 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o, |
| 841 RCBit r) { |
| 842 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r); |
| 843 } |
| 844 |
| 845 |
| 846 void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o, |
| 847 RCBit r) { |
| 848 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r); |
| 849 } |
| 850 |
| 851 |
| 852 void Assembler::subfic(Register dst, Register src, const Operand& imm) { |
| 853 d_form(SUBFIC, dst, src, imm.imm_, true); |
| 854 } |
| 855 |
| 856 |
| 857 void Assembler::add(Register dst, Register src1, Register src2, OEBit o, |
| 858 RCBit r) { |
| 859 xo_form(EXT2 | ADDX, dst, src1, src2, o, r); |
| 860 } |
| 861 |
| 862 |
| 863 // Multiply low word |
| 864 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o, |
| 865 RCBit r) { |
| 866 xo_form(EXT2 | MULLW, dst, src1, src2, o, r); |
| 867 } |
| 868 |
| 869 |
| 870 // Multiply hi word |
| 871 void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o, |
| 872 RCBit r) { |
| 873 xo_form(EXT2 | MULHWX, dst, src1, src2, o, r); |
| 874 } |
| 875 |
| 876 |
| 877 // Divide word |
| 878 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o, |
| 879 RCBit r) { |
| 880 xo_form(EXT2 | DIVW, dst, src1, src2, o, r); |
| 881 } |
| 882 |
| 883 |
| 884 void Assembler::addi(Register dst, Register src, const Operand& imm) { |
| 885 DCHECK(!src.is(r0)); // use li instead to show intent |
| 886 d_form(ADDI, dst, src, imm.imm_, true); |
| 887 } |
| 888 |
| 889 |
| 890 void Assembler::addis(Register dst, Register src, const Operand& imm) { |
| 891 DCHECK(!src.is(r0)); // use lis instead to show intent |
| 892 d_form(ADDIS, dst, src, imm.imm_, true); |
| 893 } |
| 894 |
| 895 |
| 896 void Assembler::addic(Register dst, Register src, const Operand& imm) { |
| 897 d_form(ADDIC, dst, src, imm.imm_, true); |
| 898 } |
| 899 |
| 900 |
| 901 void Assembler::andi(Register ra, Register rs, const Operand& imm) { |
| 902 d_form(ANDIx, rs, ra, imm.imm_, false); |
| 903 } |
| 904 |
| 905 |
| 906 void Assembler::andis(Register ra, Register rs, const Operand& imm) { |
| 907 d_form(ANDISx, rs, ra, imm.imm_, false); |
| 908 } |
| 909 |
| 910 |
| 911 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) { |
| 912 x_form(EXT2 | NORX, dst, src1, src2, r); |
| 913 } |
| 914 |
| 915 |
| 916 void Assembler::notx(Register dst, Register src, RCBit r) { |
| 917 x_form(EXT2 | NORX, dst, src, src, r); |
| 918 } |
| 919 |
| 920 |
| 921 void Assembler::ori(Register ra, Register rs, const Operand& imm) { |
| 922 d_form(ORI, rs, ra, imm.imm_, false); |
| 923 } |
| 924 |
| 925 |
| 926 void Assembler::oris(Register dst, Register src, const Operand& imm) { |
| 927 d_form(ORIS, src, dst, imm.imm_, false); |
| 928 } |
| 929 |
| 930 |
| 931 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) { |
| 932 x_form(EXT2 | ORX, dst, src1, src2, rc); |
| 933 } |
| 934 |
| 935 |
| 936 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) { |
| 937 intptr_t imm16 = src2.imm_; |
| 938 #if V8_TARGET_ARCH_PPC64 |
| 939 int L = 1; |
| 940 #else |
| 941 int L = 0; |
| 942 #endif |
| 943 DCHECK(is_int16(imm16)); |
| 944 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 945 imm16 &= kImm16Mask; |
| 946 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16); |
| 947 } |
| 948 |
| 949 |
| 950 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) { |
| 951 uintptr_t uimm16 = src2.imm_; |
| 952 #if V8_TARGET_ARCH_PPC64 |
| 953 int L = 1; |
| 954 #else |
| 955 int L = 0; |
| 956 #endif |
| 957 DCHECK(is_uint16(uimm16)); |
| 958 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 959 uimm16 &= kImm16Mask; |
| 960 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16); |
| 961 } |
| 962 |
| 963 |
| 964 void Assembler::cmp(Register src1, Register src2, CRegister cr) { |
| 965 #if V8_TARGET_ARCH_PPC64 |
| 966 int L = 1; |
| 967 #else |
| 968 int L = 0; |
| 969 #endif |
| 970 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 971 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 | |
| 972 src2.code() * B11); |
| 973 } |
| 974 |
| 975 |
| 976 void Assembler::cmpl(Register src1, Register src2, CRegister cr) { |
| 977 #if V8_TARGET_ARCH_PPC64 |
| 978 int L = 1; |
| 979 #else |
| 980 int L = 0; |
| 981 #endif |
| 982 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 983 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 | |
| 984 src2.code() * B11); |
| 985 } |
| 986 |
| 987 |
| 988 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) { |
| 989 intptr_t imm16 = src2.imm_; |
| 990 int L = 0; |
| 991 DCHECK(is_int16(imm16)); |
| 992 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 993 imm16 &= kImm16Mask; |
| 994 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16); |
| 995 } |
| 996 |
| 997 |
| 998 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) { |
| 999 uintptr_t uimm16 = src2.imm_; |
| 1000 int L = 0; |
| 1001 DCHECK(is_uint16(uimm16)); |
| 1002 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1003 uimm16 &= kImm16Mask; |
| 1004 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16); |
| 1005 } |
| 1006 |
| 1007 |
| 1008 void Assembler::cmpw(Register src1, Register src2, CRegister cr) { |
| 1009 int L = 0; |
| 1010 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1011 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 | |
| 1012 src2.code() * B11); |
| 1013 } |
| 1014 |
| 1015 |
| 1016 void Assembler::cmplw(Register src1, Register src2, CRegister cr) { |
| 1017 int L = 0; |
| 1018 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1019 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 | |
| 1020 src2.code() * B11); |
| 1021 } |
| 1022 |
| 1023 |
| 1024 // Pseudo op - load immediate |
| 1025 void Assembler::li(Register dst, const Operand& imm) { |
| 1026 d_form(ADDI, dst, r0, imm.imm_, true); |
| 1027 } |
| 1028 |
| 1029 |
| 1030 void Assembler::lis(Register dst, const Operand& imm) { |
| 1031 d_form(ADDIS, dst, r0, imm.imm_, true); |
| 1032 } |
| 1033 |
| 1034 |
| 1035 // Pseudo op - move register |
| 1036 void Assembler::mr(Register dst, Register src) { |
| 1037 // actually or(dst, src, src) |
| 1038 orx(dst, src, src); |
| 1039 } |
| 1040 |
| 1041 |
| 1042 void Assembler::lbz(Register dst, const MemOperand& src) { |
| 1043 DCHECK(!src.ra_.is(r0)); |
| 1044 d_form(LBZ, dst, src.ra(), src.offset(), true); |
| 1045 } |
| 1046 |
| 1047 |
| 1048 void Assembler::lbzx(Register rt, const MemOperand& src) { |
| 1049 Register ra = src.ra(); |
| 1050 Register rb = src.rb(); |
| 1051 DCHECK(!ra.is(r0)); |
| 1052 emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1053 LeaveRC); |
| 1054 } |
| 1055 |
| 1056 |
| 1057 void Assembler::lbzux(Register rt, const MemOperand& src) { |
| 1058 Register ra = src.ra(); |
| 1059 Register rb = src.rb(); |
| 1060 DCHECK(!ra.is(r0)); |
| 1061 emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1062 LeaveRC); |
| 1063 } |
| 1064 |
| 1065 |
| 1066 void Assembler::lhz(Register dst, const MemOperand& src) { |
| 1067 DCHECK(!src.ra_.is(r0)); |
| 1068 d_form(LHZ, dst, src.ra(), src.offset(), true); |
| 1069 } |
| 1070 |
| 1071 |
| 1072 void Assembler::lhzx(Register rt, const MemOperand& src) { |
| 1073 Register ra = src.ra(); |
| 1074 Register rb = src.rb(); |
| 1075 DCHECK(!ra.is(r0)); |
| 1076 emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1077 LeaveRC); |
| 1078 } |
| 1079 |
| 1080 |
| 1081 void Assembler::lhzux(Register rt, const MemOperand& src) { |
| 1082 Register ra = src.ra(); |
| 1083 Register rb = src.rb(); |
| 1084 DCHECK(!ra.is(r0)); |
| 1085 emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1086 LeaveRC); |
| 1087 } |
| 1088 |
| 1089 |
| 1090 void Assembler::lwz(Register dst, const MemOperand& src) { |
| 1091 DCHECK(!src.ra_.is(r0)); |
| 1092 d_form(LWZ, dst, src.ra(), src.offset(), true); |
| 1093 } |
| 1094 |
| 1095 |
| 1096 void Assembler::lwzu(Register dst, const MemOperand& src) { |
| 1097 DCHECK(!src.ra_.is(r0)); |
| 1098 d_form(LWZU, dst, src.ra(), src.offset(), true); |
| 1099 } |
| 1100 |
| 1101 |
| 1102 void Assembler::lwzx(Register rt, const MemOperand& src) { |
| 1103 Register ra = src.ra(); |
| 1104 Register rb = src.rb(); |
| 1105 DCHECK(!ra.is(r0)); |
| 1106 emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1107 LeaveRC); |
| 1108 } |
| 1109 |
| 1110 |
| 1111 void Assembler::lwzux(Register rt, const MemOperand& src) { |
| 1112 Register ra = src.ra(); |
| 1113 Register rb = src.rb(); |
| 1114 DCHECK(!ra.is(r0)); |
| 1115 emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1116 LeaveRC); |
| 1117 } |
| 1118 |
| 1119 |
| 1120 void Assembler::lwa(Register dst, const MemOperand& src) { |
| 1121 #if V8_TARGET_ARCH_PPC64 |
| 1122 int offset = src.offset(); |
| 1123 DCHECK(!src.ra_.is(r0)); |
| 1124 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1125 offset = kImm16Mask & offset; |
| 1126 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2); |
| 1127 #else |
| 1128 lwz(dst, src); |
| 1129 #endif |
| 1130 } |
| 1131 |
| 1132 |
| 1133 void Assembler::stb(Register dst, const MemOperand& src) { |
| 1134 DCHECK(!src.ra_.is(r0)); |
| 1135 d_form(STB, dst, src.ra(), src.offset(), true); |
| 1136 } |
| 1137 |
| 1138 |
| 1139 void Assembler::stbx(Register rs, const MemOperand& src) { |
| 1140 Register ra = src.ra(); |
| 1141 Register rb = src.rb(); |
| 1142 DCHECK(!ra.is(r0)); |
| 1143 emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1144 LeaveRC); |
| 1145 } |
| 1146 |
| 1147 |
| 1148 void Assembler::stbux(Register rs, const MemOperand& src) { |
| 1149 Register ra = src.ra(); |
| 1150 Register rb = src.rb(); |
| 1151 DCHECK(!ra.is(r0)); |
| 1152 emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1153 LeaveRC); |
| 1154 } |
| 1155 |
| 1156 |
| 1157 void Assembler::sth(Register dst, const MemOperand& src) { |
| 1158 DCHECK(!src.ra_.is(r0)); |
| 1159 d_form(STH, dst, src.ra(), src.offset(), true); |
| 1160 } |
| 1161 |
| 1162 |
| 1163 void Assembler::sthx(Register rs, const MemOperand& src) { |
| 1164 Register ra = src.ra(); |
| 1165 Register rb = src.rb(); |
| 1166 DCHECK(!ra.is(r0)); |
| 1167 emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1168 LeaveRC); |
| 1169 } |
| 1170 |
| 1171 |
| 1172 void Assembler::sthux(Register rs, const MemOperand& src) { |
| 1173 Register ra = src.ra(); |
| 1174 Register rb = src.rb(); |
| 1175 DCHECK(!ra.is(r0)); |
| 1176 emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1177 LeaveRC); |
| 1178 } |
| 1179 |
| 1180 |
| 1181 void Assembler::stw(Register dst, const MemOperand& src) { |
| 1182 DCHECK(!src.ra_.is(r0)); |
| 1183 d_form(STW, dst, src.ra(), src.offset(), true); |
| 1184 } |
| 1185 |
| 1186 |
| 1187 void Assembler::stwu(Register dst, const MemOperand& src) { |
| 1188 DCHECK(!src.ra_.is(r0)); |
| 1189 d_form(STWU, dst, src.ra(), src.offset(), true); |
| 1190 } |
| 1191 |
| 1192 |
| 1193 void Assembler::stwx(Register rs, const MemOperand& src) { |
| 1194 Register ra = src.ra(); |
| 1195 Register rb = src.rb(); |
| 1196 DCHECK(!ra.is(r0)); |
| 1197 emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1198 LeaveRC); |
| 1199 } |
| 1200 |
| 1201 |
| 1202 void Assembler::stwux(Register rs, const MemOperand& src) { |
| 1203 Register ra = src.ra(); |
| 1204 Register rb = src.rb(); |
| 1205 DCHECK(!ra.is(r0)); |
| 1206 emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1207 LeaveRC); |
| 1208 } |
| 1209 |
| 1210 |
| 1211 void Assembler::extsb(Register rs, Register ra, RCBit rc) { |
| 1212 emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc); |
| 1213 } |
| 1214 |
| 1215 |
| 1216 void Assembler::extsh(Register rs, Register ra, RCBit rc) { |
| 1217 emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc); |
| 1218 } |
| 1219 |
| 1220 |
| 1221 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) { |
| 1222 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r); |
| 1223 } |
| 1224 |
| 1225 |
| 1226 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) { |
| 1227 x_form(EXT2 | ANDCX, dst, src1, src2, rc); |
| 1228 } |
| 1229 |
| 1230 |
| 1231 #if V8_TARGET_ARCH_PPC64 |
| 1232 // 64bit specific instructions |
| 1233 void Assembler::ld(Register rd, const MemOperand& src) { |
| 1234 int offset = src.offset(); |
| 1235 DCHECK(!src.ra_.is(r0)); |
| 1236 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1237 offset = kImm16Mask & offset; |
| 1238 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset); |
| 1239 } |
| 1240 |
| 1241 |
| 1242 void Assembler::ldx(Register rd, const MemOperand& src) { |
| 1243 Register ra = src.ra(); |
| 1244 Register rb = src.rb(); |
| 1245 DCHECK(!ra.is(r0)); |
| 1246 emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11); |
| 1247 } |
| 1248 |
| 1249 |
| 1250 void Assembler::ldu(Register rd, const MemOperand& src) { |
| 1251 int offset = src.offset(); |
| 1252 DCHECK(!src.ra_.is(r0)); |
| 1253 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1254 offset = kImm16Mask & offset; |
| 1255 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1); |
| 1256 } |
| 1257 |
| 1258 |
| 1259 void Assembler::ldux(Register rd, const MemOperand& src) { |
| 1260 Register ra = src.ra(); |
| 1261 Register rb = src.rb(); |
| 1262 DCHECK(!ra.is(r0)); |
| 1263 emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11); |
| 1264 } |
| 1265 |
| 1266 |
| 1267 void Assembler::std(Register rs, const MemOperand& src) { |
| 1268 int offset = src.offset(); |
| 1269 DCHECK(!src.ra_.is(r0)); |
| 1270 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1271 offset = kImm16Mask & offset; |
| 1272 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset); |
| 1273 } |
| 1274 |
| 1275 |
| 1276 void Assembler::stdx(Register rs, const MemOperand& src) { |
| 1277 Register ra = src.ra(); |
| 1278 Register rb = src.rb(); |
| 1279 DCHECK(!ra.is(r0)); |
| 1280 emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11); |
| 1281 } |
| 1282 |
| 1283 |
| 1284 void Assembler::stdu(Register rs, const MemOperand& src) { |
| 1285 int offset = src.offset(); |
| 1286 DCHECK(!src.ra_.is(r0)); |
| 1287 DCHECK(!(offset & 3) && is_int16(offset)); |
| 1288 offset = kImm16Mask & offset; |
| 1289 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1); |
| 1290 } |
| 1291 |
| 1292 |
| 1293 void Assembler::stdux(Register rs, const MemOperand& src) { |
| 1294 Register ra = src.ra(); |
| 1295 Register rb = src.rb(); |
| 1296 DCHECK(!ra.is(r0)); |
| 1297 emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11); |
| 1298 } |
| 1299 |
| 1300 |
| 1301 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1302 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r); |
| 1303 } |
| 1304 |
| 1305 |
| 1306 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1307 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r); |
| 1308 } |
| 1309 |
| 1310 |
| 1311 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) { |
| 1312 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r); |
| 1313 } |
| 1314 |
| 1315 |
| 1316 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) { |
| 1317 md_form(EXT5 | RLDICR, ra, rs, sh, me, r); |
| 1318 } |
| 1319 |
| 1320 |
| 1321 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) { |
| 1322 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1323 rldicr(dst, src, val.imm_, 63 - val.imm_, rc); |
| 1324 } |
| 1325 |
| 1326 |
| 1327 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) { |
| 1328 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1329 rldicl(dst, src, 64 - val.imm_, val.imm_, rc); |
| 1330 } |
| 1331 |
| 1332 |
| 1333 void Assembler::clrrdi(Register dst, Register src, const Operand& val, |
| 1334 RCBit rc) { |
| 1335 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1336 rldicr(dst, src, 0, 63 - val.imm_, rc); |
| 1337 } |
| 1338 |
| 1339 |
| 1340 void Assembler::clrldi(Register dst, Register src, const Operand& val, |
| 1341 RCBit rc) { |
| 1342 DCHECK((64 > val.imm_) && (val.imm_ >= 0)); |
| 1343 rldicl(dst, src, 0, val.imm_, rc); |
| 1344 } |
| 1345 |
| 1346 |
| 1347 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1348 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r); |
| 1349 } |
| 1350 |
| 1351 |
| 1352 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) { |
| 1353 int sh0_4 = sh & 0x1f; |
| 1354 int sh5 = (sh >> 5) & 0x1; |
| 1355 |
| 1356 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | |
| 1357 sh5 * B1 | r); |
| 1358 } |
| 1359 |
| 1360 |
| 1361 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) { |
| 1362 x_form(EXT2 | SRDX, dst, src1, src2, r); |
| 1363 } |
| 1364 |
| 1365 |
| 1366 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) { |
| 1367 x_form(EXT2 | SLDX, dst, src1, src2, r); |
| 1368 } |
| 1369 |
| 1370 |
| 1371 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) { |
| 1372 x_form(EXT2 | SRAD, ra, rs, rb, r); |
| 1373 } |
| 1374 |
| 1375 |
| 1376 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) { |
| 1377 rldcl(ra, rs, rb, 0, r); |
| 1378 } |
| 1379 |
| 1380 |
| 1381 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) { |
| 1382 rldicl(ra, rs, sh, 0, r); |
| 1383 } |
| 1384 |
| 1385 |
| 1386 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) { |
| 1387 rldicl(ra, rs, 64 - sh, 0, r); |
| 1388 } |
| 1389 |
| 1390 |
| 1391 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) { |
| 1392 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc); |
| 1393 } |
| 1394 |
| 1395 |
| 1396 void Assembler::extsw(Register rs, Register ra, RCBit rc) { |
| 1397 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc); |
| 1398 } |
| 1399 |
| 1400 |
| 1401 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o, |
| 1402 RCBit r) { |
| 1403 xo_form(EXT2 | MULLD, dst, src1, src2, o, r); |
| 1404 } |
| 1405 |
| 1406 |
| 1407 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o, |
| 1408 RCBit r) { |
| 1409 xo_form(EXT2 | DIVD, dst, src1, src2, o, r); |
| 1410 } |
| 1411 #endif |
| 1412 |
| 1413 |
| 1414 void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) { |
| 1415 DCHECK(fopcode < fLastFaker); |
| 1416 emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode); |
| 1417 } |
| 1418 |
| 1419 |
| 1420 void Assembler::marker_asm(int mcode) { |
| 1421 if (::v8::internal::FLAG_trace_sim_stubs) { |
| 1422 DCHECK(mcode < F_NEXT_AVAILABLE_STUB_MARKER); |
| 1423 emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode); |
| 1424 } |
| 1425 } |
| 1426 |
| 1427 |
| 1428 // Function descriptor for AIX. |
| 1429 // Code address skips the function descriptor "header". |
| 1430 // TOC and static chain are ignored and set to 0. |
| 1431 void Assembler::function_descriptor() { |
| 1432 DCHECK(pc_offset() == 0); |
| 1433 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); |
| 1434 emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize); |
| 1435 emit_ptr(0); |
| 1436 emit_ptr(0); |
| 1437 } |
| 1438 |
| 1439 |
| 1440 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 1441 void Assembler::RelocateInternalReference(Address pc, intptr_t delta, |
| 1442 Address code_start, |
| 1443 ICacheFlushMode icache_flush_mode) { |
| 1444 DCHECK(delta || code_start); |
| 1445 #if ABI_USES_FUNCTION_DESCRIPTORS |
| 1446 uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc); |
| 1447 if (fd[1] == 0 && fd[2] == 0) { |
| 1448 // Function descriptor |
| 1449 if (delta) { |
| 1450 fd[0] += delta; |
| 1451 } else { |
| 1452 fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize; |
| 1453 } |
| 1454 return; |
| 1455 } |
| 1456 #endif |
| 1457 #if V8_OOL_CONSTANT_POOL |
| 1458 // mov for LoadConstantPoolPointerRegister |
| 1459 ConstantPoolArray* constant_pool = NULL; |
| 1460 if (delta) { |
| 1461 code_start = target_address_at(pc, constant_pool) + delta; |
| 1462 } |
| 1463 set_target_address_at(pc, constant_pool, code_start, icache_flush_mode); |
| 1464 #endif |
| 1465 } |
| 1466 |
| 1467 |
| 1468 int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) { |
| 1469 #if ABI_USES_FUNCTION_DESCRIPTORS |
| 1470 uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc); |
| 1471 if (fd[1] == 0 && fd[2] == 0) { |
| 1472 // Function descriptor |
| 1473 SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR |
| 1474 "]" |
| 1475 " function descriptor", |
| 1476 fd[0], fd[1], fd[2]); |
| 1477 return kPointerSize * 3; |
| 1478 } |
| 1479 #endif |
| 1480 return 0; |
| 1481 } |
| 1482 #endif |
| 1483 |
| 1484 |
| 1485 int Assembler::instructions_required_for_mov(const Operand& x) const { |
| 1486 #if V8_OOL_CONSTANT_POOL || DEBUG |
| 1487 bool canOptimize = |
| 1488 !(x.must_output_reloc_info(this) || is_trampoline_pool_blocked()); |
| 1489 #endif |
| 1490 #if V8_OOL_CONSTANT_POOL |
| 1491 if (use_constant_pool_for_mov(x, canOptimize)) { |
| 1492 // Current usage guarantees that all constant pool references can |
| 1493 // use the same sequence. |
| 1494 return kMovInstructionsConstantPool; |
| 1495 } |
| 1496 #endif |
| 1497 DCHECK(!canOptimize); |
| 1498 return kMovInstructionsNoConstantPool; |
| 1499 } |
| 1500 |
| 1501 |
| 1502 #if V8_OOL_CONSTANT_POOL |
| 1503 bool Assembler::use_constant_pool_for_mov(const Operand& x, |
| 1504 bool canOptimize) const { |
| 1505 if (!is_constant_pool_available() || is_constant_pool_full()) { |
| 1506 // If there is no constant pool available, we must use a mov |
| 1507 // immediate sequence. |
| 1508 return false; |
| 1509 } |
| 1510 |
| 1511 intptr_t value = x.immediate(); |
| 1512 if (canOptimize && is_int16(value)) { |
| 1513 // Prefer a single-instruction load-immediate. |
| 1514 return false; |
| 1515 } |
| 1516 |
| 1517 return true; |
| 1518 } |
| 1519 |
| 1520 |
| 1521 void Assembler::EnsureSpaceFor(int space_needed) { |
| 1522 if (buffer_space() <= (kGap + space_needed)) { |
| 1523 GrowBuffer(); |
| 1524 } |
| 1525 } |
| 1526 #endif |
| 1527 |
| 1528 |
| 1529 bool Operand::must_output_reloc_info(const Assembler* assembler) const { |
| 1530 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { |
| 1531 if (assembler != NULL && assembler->predictable_code_size()) return true; |
| 1532 return assembler->serializer_enabled(); |
| 1533 } else if (RelocInfo::IsNone(rmode_)) { |
| 1534 return false; |
| 1535 } |
| 1536 return true; |
| 1537 } |
| 1538 |
| 1539 |
| 1540 // Primarily used for loading constants |
| 1541 // This should really move to be in macro-assembler as it |
| 1542 // is really a pseudo instruction |
| 1543 // Some usages of this intend for a FIXED_SEQUENCE to be used |
| 1544 // Todo - break this dependency so we can optimize mov() in general |
| 1545 // and only use the generic version when we require a fixed sequence |
| 1546 void Assembler::mov(Register dst, const Operand& src) { |
| 1547 intptr_t value = src.immediate(); |
| 1548 bool canOptimize; |
| 1549 RelocInfo rinfo(pc_, src.rmode_, value, NULL); |
| 1550 |
| 1551 if (src.must_output_reloc_info(this)) { |
| 1552 RecordRelocInfo(rinfo); |
| 1553 } |
| 1554 |
| 1555 canOptimize = |
| 1556 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked()); |
| 1557 |
| 1558 #if V8_OOL_CONSTANT_POOL |
| 1559 if (use_constant_pool_for_mov(src, canOptimize)) { |
| 1560 DCHECK(is_constant_pool_available()); |
| 1561 ConstantPoolAddEntry(rinfo); |
| 1562 #if V8_TARGET_ARCH_PPC64 |
| 1563 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1564 // We are forced to use 2 instruction sequence since the constant |
| 1565 // pool pointer is tagged. |
| 1566 li(dst, Operand::Zero()); |
| 1567 ldx(dst, MemOperand(kConstantPoolRegister, dst)); |
| 1568 #else |
| 1569 lwz(dst, MemOperand(kConstantPoolRegister, 0)); |
| 1570 #endif |
| 1571 return; |
| 1572 } |
| 1573 #endif |
| 1574 |
| 1575 if (canOptimize) { |
| 1576 if (is_int16(value)) { |
| 1577 li(dst, Operand(value)); |
| 1578 } else { |
| 1579 uint16_t u16; |
| 1580 #if V8_TARGET_ARCH_PPC64 |
| 1581 if (is_int32(value)) { |
| 1582 #endif |
| 1583 lis(dst, Operand(value >> 16)); |
| 1584 #if V8_TARGET_ARCH_PPC64 |
| 1585 } else { |
| 1586 if (is_int48(value)) { |
| 1587 li(dst, Operand(value >> 32)); |
| 1588 } else { |
| 1589 lis(dst, Operand(value >> 48)); |
| 1590 u16 = ((value >> 32) & 0xffff); |
| 1591 if (u16) { |
| 1592 ori(dst, dst, Operand(u16)); |
| 1593 } |
| 1594 } |
| 1595 sldi(dst, dst, Operand(32)); |
| 1596 u16 = ((value >> 16) & 0xffff); |
| 1597 if (u16) { |
| 1598 oris(dst, dst, Operand(u16)); |
| 1599 } |
| 1600 } |
| 1601 #endif |
| 1602 u16 = (value & 0xffff); |
| 1603 if (u16) { |
| 1604 ori(dst, dst, Operand(u16)); |
| 1605 } |
| 1606 } |
| 1607 return; |
| 1608 } |
| 1609 |
| 1610 DCHECK(!canOptimize); |
| 1611 |
| 1612 { |
| 1613 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1614 #if V8_TARGET_ARCH_PPC64 |
| 1615 int32_t hi_32 = static_cast<int32_t>(value >> 32); |
| 1616 int32_t lo_32 = static_cast<int32_t>(value); |
| 1617 int hi_word = static_cast<int>(hi_32 >> 16); |
| 1618 int lo_word = static_cast<int>(hi_32 & 0xffff); |
| 1619 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); |
| 1620 ori(dst, dst, Operand(lo_word)); |
| 1621 sldi(dst, dst, Operand(32)); |
| 1622 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff)); |
| 1623 lo_word = static_cast<int>(lo_32 & 0xffff); |
| 1624 oris(dst, dst, Operand(hi_word)); |
| 1625 ori(dst, dst, Operand(lo_word)); |
| 1626 #else |
| 1627 int hi_word = static_cast<int>(value >> 16); |
| 1628 int lo_word = static_cast<int>(value & 0xffff); |
| 1629 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); |
| 1630 ori(dst, dst, Operand(lo_word)); |
| 1631 #endif |
| 1632 } |
| 1633 } |
| 1634 |
| 1635 |
| 1636 void Assembler::mov_label_offset(Register dst, Label* label) { |
| 1637 if (label->is_bound()) { |
| 1638 int target = label->pos(); |
| 1639 mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag)); |
| 1640 } else { |
| 1641 bool is_linked = label->is_linked(); |
| 1642 // Emit the link to the label in the code stream followed by extra |
| 1643 // nop instructions. |
| 1644 DCHECK(dst.is(r3)); // target_at_put assumes r3 for now |
| 1645 int link = is_linked ? label->pos() - pc_offset() : 0; |
| 1646 label->link_to(pc_offset()); |
| 1647 |
| 1648 if (!is_linked && !trampoline_emitted_) { |
| 1649 unbound_labels_count_++; |
| 1650 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1651 } |
| 1652 |
| 1653 // When the label is bound, these instructions will be patched |
| 1654 // with a 2 instruction mov sequence that will load the |
| 1655 // destination register with the position of the label from the |
| 1656 // beginning of the code. |
| 1657 // |
| 1658 // When the label gets bound: target_at extracts the link and |
| 1659 // target_at_put patches the instructions. |
| 1660 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1661 emit(link); |
| 1662 nop(); |
| 1663 } |
| 1664 } |
| 1665 |
| 1666 |
| 1667 // Special register instructions |
| 1668 void Assembler::crxor(int bt, int ba, int bb) { |
| 1669 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11); |
| 1670 } |
| 1671 |
| 1672 |
| 1673 void Assembler::creqv(int bt, int ba, int bb) { |
| 1674 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11); |
| 1675 } |
| 1676 |
| 1677 |
| 1678 void Assembler::mflr(Register dst) { |
| 1679 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit |
| 1680 } |
| 1681 |
| 1682 |
| 1683 void Assembler::mtlr(Register src) { |
| 1684 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit |
| 1685 } |
| 1686 |
| 1687 |
| 1688 void Assembler::mtctr(Register src) { |
| 1689 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit |
| 1690 } |
| 1691 |
| 1692 |
| 1693 void Assembler::mtxer(Register src) { |
| 1694 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11); |
| 1695 } |
| 1696 |
| 1697 |
| 1698 void Assembler::mcrfs(int bf, int bfa) { |
| 1699 emit(EXT4 | MCRFS | bf * B23 | bfa * B18); |
| 1700 } |
| 1701 |
| 1702 |
| 1703 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); } |
| 1704 |
| 1705 |
| 1706 #if V8_TARGET_ARCH_PPC64 |
| 1707 void Assembler::mffprd(Register dst, DoubleRegister src) { |
| 1708 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16); |
| 1709 } |
| 1710 |
| 1711 |
| 1712 void Assembler::mffprwz(Register dst, DoubleRegister src) { |
| 1713 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16); |
| 1714 } |
| 1715 |
| 1716 |
| 1717 void Assembler::mtfprd(DoubleRegister dst, Register src) { |
| 1718 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16); |
| 1719 } |
| 1720 |
| 1721 |
| 1722 void Assembler::mtfprwz(DoubleRegister dst, Register src) { |
| 1723 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16); |
| 1724 } |
| 1725 |
| 1726 |
| 1727 void Assembler::mtfprwa(DoubleRegister dst, Register src) { |
| 1728 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16); |
| 1729 } |
| 1730 #endif |
| 1731 |
| 1732 |
| 1733 // Exception-generating instructions and debugging support. |
| 1734 // Stops with a non-negative code less than kNumOfWatchedStops support |
| 1735 // enabling/disabling and a counter feature. See simulator-ppc.h . |
| 1736 void Assembler::stop(const char* msg, Condition cond, int32_t code, |
| 1737 CRegister cr) { |
| 1738 if (cond != al) { |
| 1739 Label skip; |
| 1740 b(NegateCondition(cond), &skip, cr); |
| 1741 bkpt(0); |
| 1742 bind(&skip); |
| 1743 } else { |
| 1744 bkpt(0); |
| 1745 } |
| 1746 } |
| 1747 |
| 1748 |
| 1749 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); } |
| 1750 |
| 1751 |
| 1752 void Assembler::info(const char* msg, Condition cond, int32_t code, |
| 1753 CRegister cr) { |
| 1754 if (::v8::internal::FLAG_trace_sim_stubs) { |
| 1755 emit(0x7d9ff808); |
| 1756 #if V8_TARGET_ARCH_PPC64 |
| 1757 uint64_t value = reinterpret_cast<uint64_t>(msg); |
| 1758 emit(static_cast<uint32_t>(value >> 32)); |
| 1759 emit(static_cast<uint32_t>(value & 0xFFFFFFFF)); |
| 1760 #else |
| 1761 emit(reinterpret_cast<Instr>(msg)); |
| 1762 #endif |
| 1763 } |
| 1764 } |
| 1765 |
| 1766 |
| 1767 void Assembler::dcbf(Register ra, Register rb) { |
| 1768 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11); |
| 1769 } |
| 1770 |
| 1771 |
| 1772 void Assembler::sync() { emit(EXT2 | SYNC); } |
| 1773 |
| 1774 |
| 1775 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); } |
| 1776 |
| 1777 |
| 1778 void Assembler::icbi(Register ra, Register rb) { |
| 1779 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11); |
| 1780 } |
| 1781 |
| 1782 |
| 1783 void Assembler::isync() { emit(EXT1 | ISYNC); } |
| 1784 |
| 1785 |
| 1786 // Floating point support |
| 1787 |
| 1788 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) { |
| 1789 int offset = src.offset(); |
| 1790 Register ra = src.ra(); |
| 1791 DCHECK(is_int16(offset)); |
| 1792 int imm16 = offset & kImm16Mask; |
| 1793 // could be x_form instruction with some casting magic |
| 1794 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16); |
| 1795 } |
| 1796 |
| 1797 |
| 1798 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) { |
| 1799 int offset = src.offset(); |
| 1800 Register ra = src.ra(); |
| 1801 DCHECK(is_int16(offset)); |
| 1802 int imm16 = offset & kImm16Mask; |
| 1803 // could be x_form instruction with some casting magic |
| 1804 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16); |
| 1805 } |
| 1806 |
| 1807 |
| 1808 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) { |
| 1809 Register ra = src.ra(); |
| 1810 Register rb = src.rb(); |
| 1811 DCHECK(!ra.is(r0)); |
| 1812 emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1813 LeaveRC); |
| 1814 } |
| 1815 |
| 1816 |
| 1817 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) { |
| 1818 Register ra = src.ra(); |
| 1819 Register rb = src.rb(); |
| 1820 DCHECK(!ra.is(r0)); |
| 1821 emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1822 LeaveRC); |
| 1823 } |
| 1824 |
| 1825 |
| 1826 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) { |
| 1827 int offset = src.offset(); |
| 1828 Register ra = src.ra(); |
| 1829 DCHECK(is_int16(offset)); |
| 1830 DCHECK(!ra.is(r0)); |
| 1831 int imm16 = offset & kImm16Mask; |
| 1832 // could be x_form instruction with some casting magic |
| 1833 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16); |
| 1834 } |
| 1835 |
| 1836 |
| 1837 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) { |
| 1838 int offset = src.offset(); |
| 1839 Register ra = src.ra(); |
| 1840 DCHECK(is_int16(offset)); |
| 1841 DCHECK(!ra.is(r0)); |
| 1842 int imm16 = offset & kImm16Mask; |
| 1843 // could be x_form instruction with some casting magic |
| 1844 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16); |
| 1845 } |
| 1846 |
| 1847 |
| 1848 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) { |
| 1849 Register ra = src.ra(); |
| 1850 Register rb = src.rb(); |
| 1851 DCHECK(!ra.is(r0)); |
| 1852 emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1853 LeaveRC); |
| 1854 } |
| 1855 |
| 1856 |
| 1857 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) { |
| 1858 Register ra = src.ra(); |
| 1859 Register rb = src.rb(); |
| 1860 DCHECK(!ra.is(r0)); |
| 1861 emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1862 LeaveRC); |
| 1863 } |
| 1864 |
| 1865 |
| 1866 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) { |
| 1867 int offset = src.offset(); |
| 1868 Register ra = src.ra(); |
| 1869 DCHECK(is_int16(offset)); |
| 1870 DCHECK(!ra.is(r0)); |
| 1871 int imm16 = offset & kImm16Mask; |
| 1872 // could be x_form instruction with some casting magic |
| 1873 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16); |
| 1874 } |
| 1875 |
| 1876 |
| 1877 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) { |
| 1878 int offset = src.offset(); |
| 1879 Register ra = src.ra(); |
| 1880 DCHECK(is_int16(offset)); |
| 1881 DCHECK(!ra.is(r0)); |
| 1882 int imm16 = offset & kImm16Mask; |
| 1883 // could be x_form instruction with some casting magic |
| 1884 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16); |
| 1885 } |
| 1886 |
| 1887 |
| 1888 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) { |
| 1889 Register ra = src.ra(); |
| 1890 Register rb = src.rb(); |
| 1891 DCHECK(!ra.is(r0)); |
| 1892 emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1893 LeaveRC); |
| 1894 } |
| 1895 |
| 1896 |
| 1897 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) { |
| 1898 Register ra = src.ra(); |
| 1899 Register rb = src.rb(); |
| 1900 DCHECK(!ra.is(r0)); |
| 1901 emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1902 LeaveRC); |
| 1903 } |
| 1904 |
| 1905 |
| 1906 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) { |
| 1907 int offset = src.offset(); |
| 1908 Register ra = src.ra(); |
| 1909 DCHECK(is_int16(offset)); |
| 1910 DCHECK(!ra.is(r0)); |
| 1911 int imm16 = offset & kImm16Mask; |
| 1912 // could be x_form instruction with some casting magic |
| 1913 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16); |
| 1914 } |
| 1915 |
| 1916 |
| 1917 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) { |
| 1918 int offset = src.offset(); |
| 1919 Register ra = src.ra(); |
| 1920 DCHECK(is_int16(offset)); |
| 1921 DCHECK(!ra.is(r0)); |
| 1922 int imm16 = offset & kImm16Mask; |
| 1923 // could be x_form instruction with some casting magic |
| 1924 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16); |
| 1925 } |
| 1926 |
| 1927 |
| 1928 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) { |
| 1929 Register ra = src.ra(); |
| 1930 Register rb = src.rb(); |
| 1931 DCHECK(!ra.is(r0)); |
| 1932 emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1933 LeaveRC); |
| 1934 } |
| 1935 |
| 1936 |
| 1937 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) { |
| 1938 Register ra = src.ra(); |
| 1939 Register rb = src.rb(); |
| 1940 DCHECK(!ra.is(r0)); |
| 1941 emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 | |
| 1942 LeaveRC); |
| 1943 } |
| 1944 |
| 1945 |
| 1946 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra, |
| 1947 const DoubleRegister frb, RCBit rc) { |
| 1948 a_form(EXT4 | FSUB, frt, fra, frb, rc); |
| 1949 } |
| 1950 |
| 1951 |
| 1952 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra, |
| 1953 const DoubleRegister frb, RCBit rc) { |
| 1954 a_form(EXT4 | FADD, frt, fra, frb, rc); |
| 1955 } |
| 1956 |
| 1957 |
| 1958 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra, |
| 1959 const DoubleRegister frc, RCBit rc) { |
| 1960 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 | |
| 1961 rc); |
| 1962 } |
| 1963 |
| 1964 |
| 1965 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra, |
| 1966 const DoubleRegister frb, RCBit rc) { |
| 1967 a_form(EXT4 | FDIV, frt, fra, frb, rc); |
| 1968 } |
| 1969 |
| 1970 |
| 1971 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb, |
| 1972 CRegister cr) { |
| 1973 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 1974 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11); |
| 1975 } |
| 1976 |
| 1977 |
| 1978 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb, |
| 1979 RCBit rc) { |
| 1980 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc); |
| 1981 } |
| 1982 |
| 1983 |
| 1984 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) { |
| 1985 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11); |
| 1986 } |
| 1987 |
| 1988 |
| 1989 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) { |
| 1990 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11); |
| 1991 } |
| 1992 |
| 1993 |
| 1994 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) { |
| 1995 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11); |
| 1996 } |
| 1997 |
| 1998 |
| 1999 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb, |
| 2000 RCBit rc) { |
| 2001 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc); |
| 2002 } |
| 2003 |
| 2004 |
| 2005 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb, |
| 2006 RCBit rc) { |
| 2007 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc); |
| 2008 } |
| 2009 |
| 2010 |
| 2011 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb, |
| 2012 RCBit rc) { |
| 2013 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc); |
| 2014 } |
| 2015 |
| 2016 |
| 2017 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb, |
| 2018 RCBit rc) { |
| 2019 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc); |
| 2020 } |
| 2021 |
| 2022 |
| 2023 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra, |
| 2024 const DoubleRegister frc, const DoubleRegister frb, |
| 2025 RCBit rc) { |
| 2026 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | |
| 2027 frc.code() * B6 | rc); |
| 2028 } |
| 2029 |
| 2030 |
| 2031 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb, |
| 2032 RCBit rc) { |
| 2033 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc); |
| 2034 } |
| 2035 |
| 2036 |
| 2037 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) { |
| 2038 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc); |
| 2039 } |
| 2040 |
| 2041 |
| 2042 void Assembler::mffs(const DoubleRegister frt, RCBit rc) { |
| 2043 emit(EXT4 | MFFS | frt.code() * B21 | rc); |
| 2044 } |
| 2045 |
| 2046 |
| 2047 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W, |
| 2048 RCBit rc) { |
| 2049 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc); |
| 2050 } |
| 2051 |
| 2052 |
| 2053 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb, |
| 2054 RCBit rc) { |
| 2055 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc); |
| 2056 } |
| 2057 |
| 2058 |
| 2059 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb, |
| 2060 RCBit rc) { |
| 2061 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc); |
| 2062 } |
| 2063 |
| 2064 |
| 2065 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra, |
| 2066 const DoubleRegister frc, const DoubleRegister frb, |
| 2067 RCBit rc) { |
| 2068 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | |
| 2069 frc.code() * B6 | rc); |
| 2070 } |
| 2071 |
| 2072 |
| 2073 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra, |
| 2074 const DoubleRegister frc, const DoubleRegister frb, |
| 2075 RCBit rc) { |
| 2076 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | |
| 2077 frc.code() * B6 | rc); |
| 2078 } |
| 2079 |
| 2080 |
| 2081 // Pseudo instructions. |
| 2082 void Assembler::nop(int type) { |
| 2083 Register reg = r0; |
| 2084 switch (type) { |
| 2085 case NON_MARKING_NOP: |
| 2086 reg = r0; |
| 2087 break; |
| 2088 case GROUP_ENDING_NOP: |
| 2089 reg = r2; |
| 2090 break; |
| 2091 case DEBUG_BREAK_NOP: |
| 2092 reg = r3; |
| 2093 break; |
| 2094 default: |
| 2095 UNIMPLEMENTED(); |
| 2096 } |
| 2097 |
| 2098 ori(reg, reg, Operand::Zero()); |
| 2099 } |
| 2100 |
| 2101 |
| 2102 bool Assembler::IsNop(Instr instr, int type) { |
| 2103 int reg = 0; |
| 2104 switch (type) { |
| 2105 case NON_MARKING_NOP: |
| 2106 reg = 0; |
| 2107 break; |
| 2108 case GROUP_ENDING_NOP: |
| 2109 reg = 2; |
| 2110 break; |
| 2111 case DEBUG_BREAK_NOP: |
| 2112 reg = 3; |
| 2113 break; |
| 2114 default: |
| 2115 UNIMPLEMENTED(); |
| 2116 } |
| 2117 return instr == (ORI | reg * B21 | reg * B16); |
| 2118 } |
| 2119 |
| 2120 |
| 2121 // Debugging. |
| 2122 void Assembler::RecordJSReturn() { |
| 2123 positions_recorder()->WriteRecordedPositions(); |
| 2124 CheckBuffer(); |
| 2125 RecordRelocInfo(RelocInfo::JS_RETURN); |
| 2126 } |
| 2127 |
| 2128 |
| 2129 void Assembler::RecordDebugBreakSlot() { |
| 2130 positions_recorder()->WriteRecordedPositions(); |
| 2131 CheckBuffer(); |
| 2132 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); |
| 2133 } |
| 2134 |
| 2135 |
| 2136 void Assembler::RecordComment(const char* msg) { |
| 2137 if (FLAG_code_comments) { |
| 2138 CheckBuffer(); |
| 2139 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
| 2140 } |
| 2141 } |
| 2142 |
| 2143 |
| 2144 void Assembler::GrowBuffer() { |
| 2145 if (!own_buffer_) FATAL("external code buffer is too small"); |
| 2146 |
| 2147 // Compute new buffer size. |
| 2148 CodeDesc desc; // the new buffer |
| 2149 if (buffer_size_ < 4 * KB) { |
| 2150 desc.buffer_size = 4 * KB; |
| 2151 } else if (buffer_size_ < 1 * MB) { |
| 2152 desc.buffer_size = 2 * buffer_size_; |
| 2153 } else { |
| 2154 desc.buffer_size = buffer_size_ + 1 * MB; |
| 2155 } |
| 2156 CHECK_GT(desc.buffer_size, 0); // no overflow |
| 2157 |
| 2158 // Set up new buffer. |
| 2159 desc.buffer = NewArray<byte>(desc.buffer_size); |
| 2160 |
| 2161 desc.instr_size = pc_offset(); |
| 2162 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 2163 |
| 2164 // Copy the data. |
| 2165 intptr_t pc_delta = desc.buffer - buffer_; |
| 2166 intptr_t rc_delta = |
| 2167 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); |
| 2168 memmove(desc.buffer, buffer_, desc.instr_size); |
| 2169 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), |
| 2170 desc.reloc_size); |
| 2171 |
| 2172 // Switch buffers. |
| 2173 DeleteArray(buffer_); |
| 2174 buffer_ = desc.buffer; |
| 2175 buffer_size_ = desc.buffer_size; |
| 2176 pc_ += pc_delta; |
| 2177 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
| 2178 reloc_info_writer.last_pc() + pc_delta); |
| 2179 |
| 2180 // None of our relocation types are pc relative pointing outside the code |
| 2181 // buffer nor pc absolute pointing inside the code buffer, so there is no need |
| 2182 // to relocate any emitted relocation entries. |
| 2183 |
| 2184 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 2185 // Relocate runtime entries. |
| 2186 for (RelocIterator it(desc); !it.done(); it.next()) { |
| 2187 RelocInfo::Mode rmode = it.rinfo()->rmode(); |
| 2188 if (rmode == RelocInfo::INTERNAL_REFERENCE) { |
| 2189 RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0); |
| 2190 } |
| 2191 } |
| 2192 #if V8_OOL_CONSTANT_POOL |
| 2193 constant_pool_builder_.Relocate(pc_delta); |
| 2194 #endif |
| 2195 #endif |
| 2196 } |
| 2197 |
| 2198 |
| 2199 void Assembler::db(uint8_t data) { |
| 2200 CheckBuffer(); |
| 2201 *reinterpret_cast<uint8_t*>(pc_) = data; |
| 2202 pc_ += sizeof(uint8_t); |
| 2203 } |
| 2204 |
| 2205 |
| 2206 void Assembler::dd(uint32_t data) { |
| 2207 CheckBuffer(); |
| 2208 *reinterpret_cast<uint32_t*>(pc_) = data; |
| 2209 pc_ += sizeof(uint32_t); |
| 2210 } |
| 2211 |
| 2212 |
| 2213 void Assembler::emit_ptr(uintptr_t data) { |
| 2214 CheckBuffer(); |
| 2215 *reinterpret_cast<uintptr_t*>(pc_) = data; |
| 2216 pc_ += sizeof(uintptr_t); |
| 2217 } |
| 2218 |
| 2219 |
| 2220 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 2221 RelocInfo rinfo(pc_, rmode, data, NULL); |
| 2222 RecordRelocInfo(rinfo); |
| 2223 } |
| 2224 |
| 2225 |
| 2226 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { |
| 2227 if (rinfo.rmode() >= RelocInfo::JS_RETURN && |
| 2228 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) { |
| 2229 // Adjust code for new modes. |
| 2230 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) || |
| 2231 RelocInfo::IsJSReturn(rinfo.rmode()) || |
| 2232 RelocInfo::IsComment(rinfo.rmode()) || |
| 2233 RelocInfo::IsPosition(rinfo.rmode())); |
| 2234 } |
| 2235 if (!RelocInfo::IsNone(rinfo.rmode())) { |
| 2236 // Don't record external references unless the heap will be serialized. |
| 2237 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { |
| 2238 if (!serializer_enabled() && !emit_debug_code()) { |
| 2239 return; |
| 2240 } |
| 2241 } |
| 2242 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
| 2243 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { |
| 2244 RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(), |
| 2245 RecordedAstId().ToInt(), NULL); |
| 2246 ClearRecordedAstId(); |
| 2247 reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 2248 } else { |
| 2249 reloc_info_writer.Write(&rinfo); |
| 2250 } |
| 2251 } |
| 2252 } |
| 2253 |
| 2254 |
| 2255 void Assembler::BlockTrampolinePoolFor(int instructions) { |
| 2256 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); |
| 2257 } |
| 2258 |
| 2259 |
| 2260 void Assembler::CheckTrampolinePool() { |
| 2261 // Some small sequences of instructions must not be broken up by the |
| 2262 // insertion of a trampoline pool; such sequences are protected by setting |
| 2263 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, |
| 2264 // which are both checked here. Also, recursive calls to CheckTrampolinePool |
| 2265 // are blocked by trampoline_pool_blocked_nesting_. |
| 2266 if ((trampoline_pool_blocked_nesting_ > 0) || |
| 2267 (pc_offset() < no_trampoline_pool_before_)) { |
| 2268 // Emission is currently blocked; make sure we try again as soon as |
| 2269 // possible. |
| 2270 if (trampoline_pool_blocked_nesting_ > 0) { |
| 2271 next_buffer_check_ = pc_offset() + kInstrSize; |
| 2272 } else { |
| 2273 next_buffer_check_ = no_trampoline_pool_before_; |
| 2274 } |
| 2275 return; |
| 2276 } |
| 2277 |
| 2278 DCHECK(!trampoline_emitted_); |
| 2279 DCHECK(unbound_labels_count_ >= 0); |
| 2280 if (unbound_labels_count_ > 0) { |
| 2281 // First we emit jump, then we emit trampoline pool. |
| 2282 { |
| 2283 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 2284 Label after_pool; |
| 2285 b(&after_pool); |
| 2286 |
| 2287 int pool_start = pc_offset(); |
| 2288 for (int i = 0; i < unbound_labels_count_; i++) { |
| 2289 b(&after_pool); |
| 2290 } |
| 2291 bind(&after_pool); |
| 2292 trampoline_ = Trampoline(pool_start, unbound_labels_count_); |
| 2293 |
| 2294 trampoline_emitted_ = true; |
| 2295 // As we are only going to emit trampoline once, we need to prevent any |
| 2296 // further emission. |
| 2297 next_buffer_check_ = kMaxInt; |
| 2298 } |
| 2299 } else { |
| 2300 // Number of branches to unbound label at this point is zero, so we can |
| 2301 // move next buffer check to maximum. |
| 2302 next_buffer_check_ = |
| 2303 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; |
| 2304 } |
| 2305 return; |
| 2306 } |
| 2307 |
| 2308 |
| 2309 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { |
| 2310 #if V8_OOL_CONSTANT_POOL |
| 2311 return constant_pool_builder_.New(isolate); |
| 2312 #else |
| 2313 // No out-of-line constant pool support. |
| 2314 DCHECK(!FLAG_enable_ool_constant_pool); |
| 2315 return isolate->factory()->empty_constant_pool_array(); |
| 2316 #endif |
| 2317 } |
| 2318 |
| 2319 |
| 2320 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { |
| 2321 #if V8_OOL_CONSTANT_POOL |
| 2322 constant_pool_builder_.Populate(this, constant_pool); |
| 2323 #else |
| 2324 // No out-of-line constant pool support. |
| 2325 DCHECK(!FLAG_enable_ool_constant_pool); |
| 2326 #endif |
| 2327 } |
| 2328 |
| 2329 |
| 2330 #if V8_OOL_CONSTANT_POOL |
| 2331 ConstantPoolBuilder::ConstantPoolBuilder() |
| 2332 : size_(0), |
| 2333 entries_(), |
| 2334 current_section_(ConstantPoolArray::SMALL_SECTION) {} |
| 2335 |
| 2336 |
| 2337 bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; } |
| 2338 |
| 2339 |
| 2340 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( |
| 2341 RelocInfo::Mode rmode) { |
| 2342 #if V8_TARGET_ARCH_PPC64 |
| 2343 // We don't support 32-bit entries at this time. |
| 2344 if (!RelocInfo::IsGCRelocMode(rmode)) { |
| 2345 return ConstantPoolArray::INT64; |
| 2346 #else |
| 2347 if (rmode == RelocInfo::NONE64) { |
| 2348 return ConstantPoolArray::INT64; |
| 2349 } else if (!RelocInfo::IsGCRelocMode(rmode)) { |
| 2350 return ConstantPoolArray::INT32; |
| 2351 #endif |
| 2352 } else if (RelocInfo::IsCodeTarget(rmode)) { |
| 2353 return ConstantPoolArray::CODE_PTR; |
| 2354 } else { |
| 2355 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); |
| 2356 return ConstantPoolArray::HEAP_PTR; |
| 2357 } |
| 2358 } |
| 2359 |
| 2360 |
| 2361 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( |
| 2362 Assembler* assm, const RelocInfo& rinfo) { |
| 2363 RelocInfo::Mode rmode = rinfo.rmode(); |
| 2364 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION && |
| 2365 rmode != RelocInfo::STATEMENT_POSITION && |
| 2366 rmode != RelocInfo::CONST_POOL); |
| 2367 |
| 2368 // Try to merge entries which won't be patched. |
| 2369 int merged_index = -1; |
| 2370 ConstantPoolArray::LayoutSection entry_section = current_section_; |
| 2371 if (RelocInfo::IsNone(rmode) || |
| 2372 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { |
| 2373 size_t i; |
| 2374 std::vector<ConstantPoolEntry>::const_iterator it; |
| 2375 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { |
| 2376 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { |
| 2377 // Merge with found entry. |
| 2378 merged_index = i; |
| 2379 entry_section = entries_[i].section_; |
| 2380 break; |
| 2381 } |
| 2382 } |
| 2383 } |
| 2384 DCHECK(entry_section <= current_section_); |
| 2385 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); |
| 2386 |
| 2387 if (merged_index == -1) { |
| 2388 // Not merged, so update the appropriate count. |
| 2389 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); |
| 2390 } |
| 2391 |
| 2392 // Check if we still have room for another entry in the small section |
| 2393 // given the limitations of the header's layout fields. |
| 2394 if (current_section_ == ConstantPoolArray::SMALL_SECTION) { |
| 2395 size_ = ConstantPoolArray::SizeFor(*small_entries()); |
| 2396 if (!is_uint12(size_)) { |
| 2397 current_section_ = ConstantPoolArray::EXTENDED_SECTION; |
| 2398 } |
| 2399 } else { |
| 2400 size_ = ConstantPoolArray::SizeForExtended(*small_entries(), |
| 2401 *extended_entries()); |
| 2402 } |
| 2403 |
| 2404 return entry_section; |
| 2405 } |
| 2406 |
| 2407 |
| 2408 void ConstantPoolBuilder::Relocate(intptr_t pc_delta) { |
| 2409 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
| 2410 entry != entries_.end(); entry++) { |
| 2411 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); |
| 2412 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); |
| 2413 } |
| 2414 } |
| 2415 |
| 2416 |
| 2417 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { |
| 2418 if (IsEmpty()) { |
| 2419 return isolate->factory()->empty_constant_pool_array(); |
| 2420 } else if (extended_entries()->is_empty()) { |
| 2421 return isolate->factory()->NewConstantPoolArray(*small_entries()); |
| 2422 } else { |
| 2423 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION); |
| 2424 return isolate->factory()->NewExtendedConstantPoolArray( |
| 2425 *small_entries(), *extended_entries()); |
| 2426 } |
| 2427 } |
| 2428 |
| 2429 |
| 2430 void ConstantPoolBuilder::Populate(Assembler* assm, |
| 2431 ConstantPoolArray* constant_pool) { |
| 2432 DCHECK_EQ(extended_entries()->is_empty(), |
| 2433 !constant_pool->is_extended_layout()); |
| 2434 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries( |
| 2435 constant_pool, ConstantPoolArray::SMALL_SECTION))); |
| 2436 if (constant_pool->is_extended_layout()) { |
| 2437 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( |
| 2438 constant_pool, ConstantPoolArray::EXTENDED_SECTION))); |
| 2439 } |
| 2440 |
| 2441 // Set up initial offsets. |
| 2442 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS] |
| 2443 [ConstantPoolArray::NUMBER_OF_TYPES]; |
| 2444 for (int section = 0; section <= constant_pool->final_section(); section++) { |
| 2445 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION) |
| 2446 ? small_entries()->total_count() |
| 2447 : 0; |
| 2448 for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) { |
| 2449 ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i); |
| 2450 if (number_of_entries_[section].count_of(type) != 0) { |
| 2451 offsets[section][type] = constant_pool->OffsetOfElementAt( |
| 2452 number_of_entries_[section].base_of(type) + section_start); |
| 2453 } |
| 2454 } |
| 2455 } |
| 2456 |
| 2457 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
| 2458 entry != entries_.end(); entry++) { |
| 2459 RelocInfo rinfo = entry->rinfo_; |
| 2460 RelocInfo::Mode rmode = entry->rinfo_.rmode(); |
| 2461 ConstantPoolArray::Type type = GetConstantPoolType(rmode); |
| 2462 |
| 2463 // Update constant pool if necessary and get the entry's offset. |
| 2464 int offset; |
| 2465 if (entry->merged_index_ == -1) { |
| 2466 offset = offsets[entry->section_][type]; |
| 2467 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type); |
| 2468 if (type == ConstantPoolArray::INT64) { |
| 2469 #if V8_TARGET_ARCH_PPC64 |
| 2470 constant_pool->set_at_offset(offset, rinfo.data()); |
| 2471 #else |
| 2472 constant_pool->set_at_offset(offset, rinfo.data64()); |
| 2473 } else if (type == ConstantPoolArray::INT32) { |
| 2474 constant_pool->set_at_offset(offset, |
| 2475 static_cast<int32_t>(rinfo.data())); |
| 2476 #endif |
| 2477 } else if (type == ConstantPoolArray::CODE_PTR) { |
| 2478 constant_pool->set_at_offset(offset, |
| 2479 reinterpret_cast<Address>(rinfo.data())); |
| 2480 } else { |
| 2481 DCHECK(type == ConstantPoolArray::HEAP_PTR); |
| 2482 constant_pool->set_at_offset(offset, |
| 2483 reinterpret_cast<Object*>(rinfo.data())); |
| 2484 } |
| 2485 offset -= kHeapObjectTag; |
| 2486 entry->merged_index_ = offset; // Stash offset for merged entries. |
| 2487 } else { |
| 2488 DCHECK(entry->merged_index_ < (entry - entries_.begin())); |
| 2489 offset = entries_[entry->merged_index_].merged_index_; |
| 2490 } |
| 2491 |
| 2492 // Patch load instruction with correct offset. |
| 2493 Assembler::SetConstantPoolOffset(rinfo.pc(), offset); |
| 2494 } |
| 2495 } |
| 2496 #endif |
| 2497 } |
| 2498 } // namespace v8::internal |
| 2499 |
| 2500 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |