OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions |
| 6 // are met: |
| 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. |
| 10 // |
| 11 // - Redistribution in binary form must reproduce the above copyright |
| 12 // notice, this list of conditions and the following disclaimer in the |
| 13 // documentation and/or other materials provided with the |
| 14 // distribution. |
| 15 // |
| 16 // - Neither the name of Sun Microsystems or the names of contributors may |
| 17 // be used to endorse or promote products derived from this software without |
| 18 // specific prior written permission. |
| 19 // |
| 20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
| 31 // OF THE POSSIBILITY OF SUCH DAMAGE. |
| 32 |
| 33 // The original source code covered by the above license above has been |
| 34 // modified significantly by Google Inc. |
| 35 // Copyright 2012 the V8 project authors. All rights reserved. |
| 36 |
| 37 // |
| 38 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 39 // |
| 40 |
| 41 #include "src/v8.h" |
| 42 |
| 43 #if V8_TARGET_ARCH_PPC |
| 44 |
| 45 #include "src/base/cpu.h" |
| 46 #include "src/ppc/assembler-ppc-inl.h" |
| 47 |
| 48 #include "src/macro-assembler.h" |
| 49 #include "src/serialize.h" |
| 50 |
| 51 namespace v8 { |
| 52 namespace internal { |
| 53 |
| 54 // Get the CPU features enabled by the build. |
| 55 static unsigned CpuFeaturesImpliedByCompiler() { |
| 56 unsigned answer = 0; |
| 57 return answer; |
| 58 } |
| 59 |
| 60 |
| 61 void CpuFeatures::ProbeImpl(bool cross_compile) { |
| 62 supported_ |= CpuFeaturesImpliedByCompiler(); |
| 63 cache_line_size_ = 128; |
| 64 |
| 65 // Only use statically determined features for cross compile (snapshot). |
| 66 if (cross_compile) return; |
| 67 |
| 68 // Detect whether frim instruction is supported (POWER5+) |
| 69 // For now we will just check for processors we know do not |
| 70 // support it |
| 71 #if V8_OS_LINUX && !defined(USE_SIMULATOR) |
| 72 // Probe for additional features at runtime. |
| 73 base::CPU cpu; |
| 74 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) { |
| 75 // Assume support |
| 76 supported_ |= (1u << FPU); |
| 77 } |
| 78 if (cpu.cache_line_size() != 0) { |
| 79 cache_line_size_ = cpu.cache_line_size(); |
| 80 } |
| 81 #else |
| 82 // Fallback: assume frim is supported -- will implement processor |
| 83 // detection for other PPC platforms if required |
| 84 supported_ |= (1u << FPU); |
| 85 #endif |
| 86 } |
| 87 |
| 88 |
| 89 void CpuFeatures::PrintTarget() { |
| 90 const char* ppc_arch = NULL; |
| 91 |
| 92 #if V8_TARGET_ARCH_PPC64 |
| 93 ppc_arch = "ppc64"; |
| 94 #else |
| 95 ppc_arch = "ppc"; |
| 96 #endif |
| 97 |
| 98 printf("target %s\n", ppc_arch); |
| 99 } |
| 100 |
| 101 |
| 102 void CpuFeatures::PrintFeatures() { |
| 103 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU)); |
| 104 } |
| 105 |
| 106 |
| 107 Register ToRegister(int num) { |
| 108 ASSERT(num >= 0 && num < kNumRegisters); |
| 109 const Register kRegisters[] = { |
| 110 r0, |
| 111 sp, |
| 112 r2, r3, r4, r5, r6, r7, r8, r9, r10, |
| 113 r11, ip, r13, r14, r15, |
| 114 r16, r17, r18, r19, r20, r21, r22, r23, r24, |
| 115 r25, r26, r27, r28, r29, r30, fp |
| 116 }; |
| 117 return kRegisters[num]; |
| 118 } |
| 119 |
| 120 |
| 121 const char* DoubleRegister::AllocationIndexToString(int index) { |
| 122 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 123 const char* const names[] = { |
| 124 "d1", |
| 125 "d2", |
| 126 "d3", |
| 127 "d4", |
| 128 "d5", |
| 129 "d6", |
| 130 "d7", |
| 131 "d8", |
| 132 "d9", |
| 133 "d10", |
| 134 "d11", |
| 135 "d12", |
| 136 }; |
| 137 return names[index]; |
| 138 } |
| 139 |
| 140 |
| 141 // ----------------------------------------------------------------------------- |
| 142 // Implementation of RelocInfo |
| 143 |
| 144 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; |
| 145 |
| 146 |
| 147 bool RelocInfo::IsCodedSpecially() { |
| 148 // The deserializer needs to know whether a pointer is specially |
| 149 // coded. Being specially coded on PPC means that it is a lis/ori |
| 150 // instruction sequence or is an out of line constant pool entry, |
| 151 // and these are always the case inside code objects. |
| 152 return true; |
| 153 } |
| 154 |
| 155 |
| 156 bool RelocInfo::IsInConstantPool() { |
| 157 #if V8_OOL_CONSTANT_POOL |
| 158 return Assembler::IsConstantPoolLoadStart(pc_); |
| 159 #else |
| 160 return false; |
| 161 #endif |
| 162 } |
| 163 |
| 164 |
| 165 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { |
| 166 // Patch the code at the current address with the supplied instructions. |
| 167 Instr* pc = reinterpret_cast<Instr*>(pc_); |
| 168 Instr* instr = reinterpret_cast<Instr*>(instructions); |
| 169 for (int i = 0; i < instruction_count; i++) { |
| 170 *(pc + i) = *(instr + i); |
| 171 } |
| 172 |
| 173 // Indicate that code has changed. |
| 174 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize); |
| 175 } |
| 176 |
| 177 |
| 178 // Patch the code at the current PC with a call to the target address. |
| 179 // Additional guard instructions can be added if required. |
| 180 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { |
| 181 // Patch the code at the current address with a call to the target. |
| 182 UNIMPLEMENTED(); |
| 183 } |
| 184 |
| 185 |
| 186 // ----------------------------------------------------------------------------- |
| 187 // Implementation of Operand and MemOperand |
| 188 // See assembler-ppc-inl.h for inlined constructors |
| 189 |
| 190 Operand::Operand(Handle<Object> handle) { |
| 191 AllowDeferredHandleDereference using_raw_address; |
| 192 rm_ = no_reg; |
| 193 // Verify all Objects referred by code are NOT in new space. |
| 194 Object* obj = *handle; |
| 195 if (obj->IsHeapObject()) { |
| 196 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
| 197 imm_ = reinterpret_cast<intptr_t>(handle.location()); |
| 198 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
| 199 } else { |
| 200 // no relocation needed |
| 201 imm_ = reinterpret_cast<intptr_t>(obj); |
| 202 rmode_ = kRelocInfo_NONEPTR; |
| 203 } |
| 204 } |
| 205 |
| 206 |
| 207 MemOperand::MemOperand(Register rn, int32_t offset) { |
| 208 ra_ = rn; |
| 209 rb_ = no_reg; |
| 210 offset_ = offset; |
| 211 } |
| 212 |
| 213 |
| 214 MemOperand::MemOperand(Register ra, Register rb) { |
| 215 ra_ = ra; |
| 216 rb_ = rb; |
| 217 offset_ = 0; |
| 218 } |
| 219 |
| 220 |
| 221 // ----------------------------------------------------------------------------- |
| 222 // Specific instructions, constants, and masks. |
| 223 |
| 224 // Spare buffer. |
| 225 static const int kMinimalBufferSize = 4*KB; |
| 226 |
| 227 |
| 228 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 229 : AssemblerBase(isolate, buffer, buffer_size), |
| 230 recorded_ast_id_(TypeFeedbackId::None()), |
| 231 #if V8_OOL_CONSTANT_POOL |
| 232 constant_pool_builder_(), |
| 233 #endif |
| 234 positions_recorder_(this) { |
| 235 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); |
| 236 |
| 237 no_trampoline_pool_before_ = 0; |
| 238 trampoline_pool_blocked_nesting_ = 0; |
| 239 // We leave space (kMaxBlockTrampolineSectionSize) |
| 240 // for BlockTrampolinePoolScope buffer. |
| 241 next_buffer_check_ = FLAG_force_long_branches |
| 242 ? kMaxInt : kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; |
| 243 internal_trampoline_exception_ = false; |
| 244 last_bound_pos_ = 0; |
| 245 |
| 246 trampoline_emitted_ = FLAG_force_long_branches; |
| 247 unbound_labels_count_ = 0; |
| 248 |
| 249 #if V8_OOL_CONSTANT_POOL |
| 250 constant_pool_available_ = false; |
| 251 #endif |
| 252 |
| 253 ClearRecordedAstId(); |
| 254 } |
| 255 |
| 256 |
| 257 void Assembler::GetCode(CodeDesc* desc) { |
| 258 // Set up code descriptor. |
| 259 desc->buffer = buffer_; |
| 260 desc->buffer_size = buffer_size_; |
| 261 desc->instr_size = pc_offset(); |
| 262 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 263 desc->origin = this; |
| 264 } |
| 265 |
| 266 |
| 267 void Assembler::Align(int m) { |
| 268 ASSERT(m >= 4 && IsPowerOf2(m)); |
| 269 while ((pc_offset() & (m - 1)) != 0) { |
| 270 nop(); |
| 271 } |
| 272 } |
| 273 |
| 274 |
| 275 void Assembler::CodeTargetAlign() { |
| 276 Align(8); |
| 277 } |
| 278 |
| 279 |
| 280 Condition Assembler::GetCondition(Instr instr) { |
| 281 switch (instr & kCondMask) { |
| 282 case BT: |
| 283 return eq; |
| 284 case BF: |
| 285 return ne; |
| 286 default: |
| 287 UNIMPLEMENTED(); |
| 288 } |
| 289 return al; |
| 290 } |
| 291 |
| 292 |
| 293 bool Assembler::IsLis(Instr instr) { |
| 294 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0); |
| 295 } |
| 296 |
| 297 |
| 298 bool Assembler::IsLi(Instr instr) { |
| 299 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0); |
| 300 } |
| 301 |
| 302 |
| 303 bool Assembler::IsAddic(Instr instr) { |
| 304 return (instr & kOpcodeMask) == ADDIC; |
| 305 } |
| 306 |
| 307 |
| 308 bool Assembler::IsOri(Instr instr) { |
| 309 return (instr & kOpcodeMask) == ORI; |
| 310 } |
| 311 |
| 312 |
| 313 bool Assembler::IsBranch(Instr instr) { |
| 314 return ((instr & kOpcodeMask) == BCX); |
| 315 } |
| 316 |
| 317 |
| 318 Register Assembler::GetRA(Instr instr) { |
| 319 Register reg; |
| 320 reg.code_ = Instruction::RAValue(instr); |
| 321 return reg; |
| 322 } |
| 323 |
| 324 |
| 325 Register Assembler::GetRB(Instr instr) { |
| 326 Register reg; |
| 327 reg.code_ = Instruction::RBValue(instr); |
| 328 return reg; |
| 329 } |
| 330 |
| 331 |
| 332 #if V8_TARGET_ARCH_PPC64 |
| 333 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori) |
| 334 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, |
| 335 Instr instr3, Instr instr4, Instr instr5) { |
| 336 // Check the instructions are indeed a five part load (into r12) |
| 337 // 3d800000 lis r12, 0 |
| 338 // 618c0000 ori r12, r12, 0 |
| 339 // 798c07c6 rldicr r12, r12, 32, 31 |
| 340 // 658c00c3 oris r12, r12, 195 |
| 341 // 618ccd40 ori r12, r12, 52544 |
| 342 return(((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) && |
| 343 (instr3 == 0x798c07c6) && |
| 344 ((instr4 >> 16) == 0x658c) && ((instr5 >> 16) == 0x618c)); |
| 345 } |
| 346 #else |
| 347 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori) |
| 348 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) { |
| 349 // Check the instruction is indeed a two part load (into r12) |
| 350 // 3d802553 lis r12, 9555 |
| 351 // 618c5000 ori r12, r12, 20480 |
| 352 return(((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c)); |
| 353 } |
| 354 #endif |
| 355 |
| 356 |
| 357 bool Assembler::IsCmpRegister(Instr instr) { |
| 358 return (((instr & kOpcodeMask) == EXT2) && |
| 359 ((instr & kExt2OpcodeMask) == CMP)); |
| 360 } |
| 361 |
| 362 |
| 363 bool Assembler::IsRlwinm(Instr instr) { |
| 364 return ((instr & kOpcodeMask) == RLWINMX); |
| 365 } |
| 366 |
| 367 |
| 368 #if V8_TARGET_ARCH_PPC64 |
| 369 bool Assembler::IsRldicl(Instr instr) { |
| 370 return (((instr & kOpcodeMask) == EXT5) && |
| 371 ((instr & kExt5OpcodeMask) == RLDICL)); |
| 372 } |
| 373 #endif |
| 374 |
| 375 |
| 376 bool Assembler::IsCmpImmediate(Instr instr) { |
| 377 return ((instr & kOpcodeMask) == CMPI); |
| 378 } |
| 379 |
| 380 |
| 381 bool Assembler::IsCrSet(Instr instr) { |
| 382 return (((instr & kOpcodeMask) == EXT1) && |
| 383 ((instr & kExt1OpcodeMask) == CREQV)); |
| 384 } |
| 385 |
| 386 |
| 387 Register Assembler::GetCmpImmediateRegister(Instr instr) { |
| 388 ASSERT(IsCmpImmediate(instr)); |
| 389 return GetRA(instr); |
| 390 } |
| 391 |
| 392 |
| 393 int Assembler::GetCmpImmediateRawImmediate(Instr instr) { |
| 394 ASSERT(IsCmpImmediate(instr)); |
| 395 return instr & kOff16Mask; |
| 396 } |
| 397 |
| 398 |
| 399 // Labels refer to positions in the (to be) generated code. |
| 400 // There are bound, linked, and unused labels. |
| 401 // |
| 402 // Bound labels refer to known positions in the already |
| 403 // generated code. pos() is the position the label refers to. |
| 404 // |
| 405 // Linked labels refer to unknown positions in the code |
| 406 // to be generated; pos() is the position of the last |
| 407 // instruction using the label. |
| 408 |
| 409 |
| 410 // The link chain is terminated by a negative code position (must be aligned) |
| 411 const int kEndOfChain = -4; |
| 412 |
| 413 |
| 414 int Assembler::target_at(int pos) { |
| 415 Instr instr = instr_at(pos); |
| 416 // check which type of branch this is 16 or 26 bit offset |
| 417 int opcode = instr & kOpcodeMask; |
| 418 if (BX == opcode) { |
| 419 int imm26 = ((instr & kImm26Mask) << 6) >> 6; |
| 420 imm26 &= ~(kAAMask|kLKMask); // discard AA|LK bits if present |
| 421 if (imm26 == 0) |
| 422 return kEndOfChain; |
| 423 return pos + imm26; |
| 424 } else if (BCX == opcode) { |
| 425 int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask)); |
| 426 imm16 &= ~(kAAMask|kLKMask); // discard AA|LK bits if present |
| 427 if (imm16 == 0) |
| 428 return kEndOfChain; |
| 429 return pos + imm16; |
| 430 } else if ((instr & ~kImm26Mask) == 0) { |
| 431 // Emitted link to a label, not part of a branch (regexp PushBacktrack). |
| 432 if (instr == 0) { |
| 433 return kEndOfChain; |
| 434 } else { |
| 435 int32_t imm26 = SIGN_EXT_IMM26(instr); |
| 436 return (imm26 + pos); |
| 437 } |
| 438 } |
| 439 |
| 440 ASSERT(false); |
| 441 return -1; |
| 442 } |
| 443 |
| 444 |
| 445 void Assembler::target_at_put(int pos, int target_pos) { |
| 446 Instr instr = instr_at(pos); |
| 447 int opcode = instr & kOpcodeMask; |
| 448 |
| 449 // check which type of branch this is 16 or 26 bit offset |
| 450 if (BX == opcode) { |
| 451 int imm26 = target_pos - pos; |
| 452 ASSERT((imm26 & (kAAMask|kLKMask)) == 0); |
| 453 instr &= ((~kImm26Mask)|kAAMask|kLKMask); |
| 454 ASSERT(is_int26(imm26)); |
| 455 instr_at_put(pos, instr | (imm26 & kImm26Mask)); |
| 456 return; |
| 457 } else if (BCX == opcode) { |
| 458 int imm16 = target_pos - pos; |
| 459 ASSERT((imm16 & (kAAMask|kLKMask)) == 0); |
| 460 instr &= ((~kImm16Mask)|kAAMask|kLKMask); |
| 461 ASSERT(is_int16(imm16)); |
| 462 instr_at_put(pos, instr | (imm16 & kImm16Mask)); |
| 463 return; |
| 464 } else if ((instr & ~kImm26Mask) == 0) { |
| 465 ASSERT(target_pos == kEndOfChain || target_pos >= 0); |
| 466 // Emitted link to a label, not part of a branch (regexp PushBacktrack). |
| 467 // Load the position of the label relative to the generated code object |
| 468 // pointer in a register. |
| 469 |
| 470 Register dst = r3; // we assume r3 for now |
| 471 ASSERT(IsNop(instr_at(pos + kInstrSize))); |
| 472 uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag); |
| 473 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), |
| 474 2, |
| 475 CodePatcher::DONT_FLUSH); |
| 476 int target_hi = static_cast<int>(target) >> 16; |
| 477 int target_lo = static_cast<int>(target) & 0XFFFF; |
| 478 |
| 479 patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi))); |
| 480 patcher.masm()->ori(dst, dst, Operand(target_lo)); |
| 481 return; |
| 482 } |
| 483 |
| 484 ASSERT(false); |
| 485 } |
| 486 |
| 487 |
| 488 int Assembler::max_reach_from(int pos) { |
| 489 Instr instr = instr_at(pos); |
| 490 int opcode = instr & kOpcodeMask; |
| 491 |
| 492 // check which type of branch this is 16 or 26 bit offset |
| 493 if (BX == opcode) { |
| 494 return 26; |
| 495 } else if (BCX == opcode) { |
| 496 return 16; |
| 497 } else if ((instr & ~kImm26Mask) == 0) { |
| 498 // Emitted label constant, not part of a branch (regexp PushBacktrack). |
| 499 return 26; |
| 500 } |
| 501 |
| 502 ASSERT(false); |
| 503 return 0; |
| 504 } |
| 505 |
| 506 |
| 507 void Assembler::bind_to(Label* L, int pos) { |
| 508 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position |
| 509 int32_t trampoline_pos = kInvalidSlotPos; |
| 510 if (L->is_linked() && !trampoline_emitted_) { |
| 511 unbound_labels_count_--; |
| 512 next_buffer_check_ += kTrampolineSlotsSize; |
| 513 } |
| 514 |
| 515 while (L->is_linked()) { |
| 516 int fixup_pos = L->pos(); |
| 517 int32_t offset = pos - fixup_pos; |
| 518 int maxReach = max_reach_from(fixup_pos); |
| 519 next(L); // call next before overwriting link with target at fixup_pos |
| 520 if (is_intn(offset, maxReach) == false) { |
| 521 if (trampoline_pos == kInvalidSlotPos) { |
| 522 trampoline_pos = get_trampoline_entry(); |
| 523 CHECK(trampoline_pos != kInvalidSlotPos); |
| 524 target_at_put(trampoline_pos, pos); |
| 525 } |
| 526 target_at_put(fixup_pos, trampoline_pos); |
| 527 } else { |
| 528 target_at_put(fixup_pos, pos); |
| 529 } |
| 530 } |
| 531 L->bind_to(pos); |
| 532 |
| 533 // Keep track of the last bound label so we don't eliminate any instructions |
| 534 // before a bound label. |
| 535 if (pos > last_bound_pos_) |
| 536 last_bound_pos_ = pos; |
| 537 } |
| 538 |
| 539 |
| 540 void Assembler::bind(Label* L) { |
| 541 ASSERT(!L->is_bound()); // label can only be bound once |
| 542 bind_to(L, pc_offset()); |
| 543 } |
| 544 |
| 545 |
| 546 |
| 547 void Assembler::next(Label* L) { |
| 548 ASSERT(L->is_linked()); |
| 549 int link = target_at(L->pos()); |
| 550 if (link == kEndOfChain) { |
| 551 L->Unuse(); |
| 552 } else { |
| 553 ASSERT(link >= 0); |
| 554 L->link_to(link); |
| 555 } |
| 556 } |
| 557 |
| 558 |
| 559 bool Assembler::is_near(Label* L, Condition cond) { |
| 560 ASSERT(L->is_bound()); |
| 561 if (L->is_bound() == false) |
| 562 return false; |
| 563 |
| 564 int maxReach = ((cond == al) ? 26 : 16); |
| 565 int offset = L->pos() - pc_offset(); |
| 566 |
| 567 return is_intn(offset, maxReach); |
| 568 } |
| 569 |
| 570 |
| 571 void Assembler::a_form(Instr instr, |
| 572 DoubleRegister frt, |
| 573 DoubleRegister fra, |
| 574 DoubleRegister frb, |
| 575 RCBit r) { |
| 576 emit(instr | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | r); |
| 577 } |
| 578 |
| 579 |
| 580 void Assembler::d_form(Instr instr, |
| 581 Register rt, |
| 582 Register ra, |
| 583 const intptr_t val, |
| 584 bool signed_disp) { |
| 585 if (signed_disp) { |
| 586 if (!is_int16(val)) { |
| 587 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val); |
| 588 } |
| 589 ASSERT(is_int16(val)); |
| 590 } else { |
| 591 if (!is_uint16(val)) { |
| 592 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR |
| 593 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n", |
| 594 val, val, is_uint16(val), kImm16Mask); |
| 595 } |
| 596 ASSERT(is_uint16(val)); |
| 597 } |
| 598 emit(instr | rt.code()*B21 | ra.code()*B16 | (kImm16Mask & val)); |
| 599 } |
| 600 |
| 601 |
| 602 void Assembler::x_form(Instr instr, |
| 603 Register ra, |
| 604 Register rs, |
| 605 Register rb, |
| 606 RCBit r) { |
| 607 emit(instr | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | r); |
| 608 } |
| 609 |
| 610 |
| 611 void Assembler::xo_form(Instr instr, |
| 612 Register rt, |
| 613 Register ra, |
| 614 Register rb, |
| 615 OEBit o, |
| 616 RCBit r) { |
| 617 emit(instr | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | o | r); |
| 618 } |
| 619 |
| 620 |
| 621 void Assembler::md_form(Instr instr, |
| 622 Register ra, |
| 623 Register rs, |
| 624 int shift, |
| 625 int maskbit, |
| 626 RCBit r) { |
| 627 int sh0_4 = shift & 0x1f; |
| 628 int sh5 = (shift >> 5) & 0x1; |
| 629 int m0_4 = maskbit & 0x1f; |
| 630 int m5 = (maskbit >> 5) & 0x1; |
| 631 |
| 632 emit(instr | rs.code()*B21 | ra.code()*B16 | |
| 633 sh0_4*B11 | m0_4*B6 | m5*B5 | sh5*B1 | r); |
| 634 } |
| 635 |
| 636 |
| 637 void Assembler::mds_form(Instr instr, |
| 638 Register ra, |
| 639 Register rs, |
| 640 Register rb, |
| 641 int maskbit, |
| 642 RCBit r) { |
| 643 int m0_4 = maskbit & 0x1f; |
| 644 int m5 = (maskbit >> 5) & 0x1; |
| 645 |
| 646 emit(instr | rs.code()*B21 | ra.code()*B16 | |
| 647 rb.code()*B11 | m0_4*B6 | m5*B5 | r); |
| 648 } |
| 649 |
| 650 |
| 651 // Returns the next free trampoline entry. |
| 652 int32_t Assembler::get_trampoline_entry() { |
| 653 int32_t trampoline_entry = kInvalidSlotPos; |
| 654 |
| 655 if (!internal_trampoline_exception_) { |
| 656 trampoline_entry = trampoline_.take_slot(); |
| 657 |
| 658 if (kInvalidSlotPos == trampoline_entry) { |
| 659 internal_trampoline_exception_ = true; |
| 660 } |
| 661 } |
| 662 return trampoline_entry; |
| 663 } |
| 664 |
| 665 |
| 666 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
| 667 int target_pos; |
| 668 if (L->is_bound()) { |
| 669 target_pos = L->pos(); |
| 670 } else { |
| 671 if (L->is_linked()) { |
| 672 target_pos = L->pos(); // L's link |
| 673 } else { |
| 674 // was: target_pos = kEndOfChain; |
| 675 // However, using branch to self to mark the first reference |
| 676 // should avoid most instances of branch offset overflow. See |
| 677 // target_at() for where this is converted back to kEndOfChain. |
| 678 target_pos = pc_offset(); |
| 679 if (!trampoline_emitted_) { |
| 680 unbound_labels_count_++; |
| 681 next_buffer_check_ -= kTrampolineSlotsSize; |
| 682 } |
| 683 } |
| 684 L->link_to(pc_offset()); |
| 685 } |
| 686 |
| 687 return target_pos - pc_offset(); |
| 688 } |
| 689 |
| 690 |
| 691 // Branch instructions. |
| 692 |
| 693 |
| 694 void Assembler::bclr(BOfield bo, LKBit lk) { |
| 695 positions_recorder()->WriteRecordedPositions(); |
| 696 emit(EXT1 | bo | BCLRX | lk); |
| 697 } |
| 698 |
| 699 |
| 700 void Assembler::bcctr(BOfield bo, LKBit lk) { |
| 701 positions_recorder()->WriteRecordedPositions(); |
| 702 emit(EXT1 | bo | BCCTRX | lk); |
| 703 } |
| 704 |
| 705 |
| 706 // Pseudo op - branch to link register |
| 707 void Assembler::blr() { |
| 708 bclr(BA, LeaveLK); |
| 709 } |
| 710 |
| 711 |
| 712 // Pseudo op - branch to count register -- used for "jump" |
| 713 void Assembler::bctr() { |
| 714 bcctr(BA, LeaveLK); |
| 715 } |
| 716 |
| 717 |
| 718 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) { |
| 719 if (lk == SetLK) { |
| 720 positions_recorder()->WriteRecordedPositions(); |
| 721 } |
| 722 ASSERT(is_int16(branch_offset)); |
| 723 emit(BCX | bo | condition_bit*B16 | (kImm16Mask & branch_offset) | lk); |
| 724 } |
| 725 |
| 726 |
| 727 void Assembler::b(int branch_offset, LKBit lk) { |
| 728 if (lk == SetLK) { |
| 729 positions_recorder()->WriteRecordedPositions(); |
| 730 } |
| 731 ASSERT((branch_offset & 3) == 0); |
| 732 int imm26 = branch_offset; |
| 733 ASSERT(is_int26(imm26)); |
| 734 // todo add AA and LK bits |
| 735 emit(BX | (imm26 & kImm26Mask) | lk); |
| 736 } |
| 737 |
| 738 |
| 739 void Assembler::xori(Register dst, Register src, const Operand& imm) { |
| 740 d_form(XORI, src, dst, imm.imm_, false); |
| 741 } |
| 742 |
| 743 |
| 744 void Assembler::xoris(Register ra, Register rs, const Operand& imm) { |
| 745 d_form(XORIS, rs, ra, imm.imm_, false); |
| 746 } |
| 747 |
| 748 |
| 749 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) { |
| 750 x_form(EXT2 | XORX, dst, src1, src2, rc); |
| 751 } |
| 752 |
| 753 |
| 754 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) { |
| 755 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc); |
| 756 } |
| 757 |
| 758 |
| 759 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) { |
| 760 x_form(EXT2 | ANDX, ra, rs, rb, rc); |
| 761 } |
| 762 |
| 763 |
| 764 void Assembler::rlwinm(Register ra, Register rs, |
| 765 int sh, int mb, int me, RCBit rc) { |
| 766 sh &= 0x1f; |
| 767 mb &= 0x1f; |
| 768 me &= 0x1f; |
| 769 emit(RLWINMX | rs.code()*B21 | ra.code()*B16 | sh*B11 | mb*B6 | me << 1 | rc); |
| 770 } |
| 771 |
| 772 |
| 773 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me, |
| 774 RCBit rc) { |
| 775 mb &= 0x1f; |
| 776 me &= 0x1f; |
| 777 emit(RLWNMX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | |
| 778 mb*B6 | me << 1 | rc); |
| 779 } |
| 780 |
| 781 |
| 782 void Assembler::rlwimi(Register ra, Register rs, |
| 783 int sh, int mb, int me, RCBit rc) { |
| 784 sh &= 0x1f; |
| 785 mb &= 0x1f; |
| 786 me &= 0x1f; |
| 787 emit(RLWIMIX | rs.code()*B21 | ra.code()*B16 | sh*B11 | mb*B6 | me << 1 | rc); |
| 788 } |
| 789 |
| 790 |
| 791 void Assembler::slwi(Register dst, Register src, const Operand& val, |
| 792 RCBit rc) { |
| 793 ASSERT((32 > val.imm_) && (val.imm_ >= 0)); |
| 794 rlwinm(dst, src, val.imm_, 0, 31-val.imm_, rc); |
| 795 } |
| 796 |
| 797 |
| 798 void Assembler::srwi(Register dst, Register src, const Operand& val, |
| 799 RCBit rc) { |
| 800 ASSERT((32 > val.imm_) && (val.imm_ >= 0)); |
| 801 rlwinm(dst, src, 32-val.imm_, val.imm_, 31, rc); |
| 802 } |
| 803 |
| 804 |
| 805 void Assembler::clrrwi(Register dst, Register src, const Operand& val, |
| 806 RCBit rc) { |
| 807 ASSERT((32 > val.imm_) && (val.imm_ >= 0)); |
| 808 rlwinm(dst, src, 0, 0, 31-val.imm_, rc); |
| 809 } |
| 810 |
| 811 |
| 812 void Assembler::clrlwi(Register dst, Register src, const Operand& val, |
| 813 RCBit rc) { |
| 814 ASSERT((32 > val.imm_) && (val.imm_ >= 0)); |
| 815 rlwinm(dst, src, 0, val.imm_, 31, rc); |
| 816 } |
| 817 |
| 818 |
| 819 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) { |
| 820 emit(EXT2 | SRAWIX | rs.code()*B21 | ra.code()*B16 | sh*B11 | r); |
| 821 } |
| 822 |
| 823 |
| 824 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) { |
| 825 x_form(EXT2 | SRWX, dst, src1, src2, r); |
| 826 } |
| 827 |
| 828 |
| 829 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) { |
| 830 x_form(EXT2 | SLWX, dst, src1, src2, r); |
| 831 } |
| 832 |
| 833 |
| 834 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) { |
| 835 x_form(EXT2 | SRAW, ra, rs, rb, r); |
| 836 } |
| 837 |
| 838 |
| 839 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) { |
| 840 rlwnm(ra, rs, rb, 0, 31, r); |
| 841 } |
| 842 |
| 843 |
| 844 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) { |
| 845 rlwinm(ra, rs, sh, 0, 31, r); |
| 846 } |
| 847 |
| 848 |
| 849 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) { |
| 850 rlwinm(ra, rs, 32 - sh, 0, 31, r); |
| 851 } |
| 852 |
| 853 |
| 854 void Assembler::subi(Register dst, Register src, const Operand& imm) { |
| 855 addi(dst, src, Operand(-(imm.imm_))); |
| 856 } |
| 857 |
| 858 void Assembler::addc(Register dst, Register src1, Register src2, |
| 859 OEBit o, RCBit r) { |
| 860 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r); |
| 861 } |
| 862 |
| 863 |
| 864 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) { |
| 865 // a special xo_form |
| 866 emit(EXT2 | ADDZEX | dst.code()*B21 | src1.code()*B16 | o | r); |
| 867 } |
| 868 |
| 869 |
| 870 void Assembler::sub(Register dst, Register src1, Register src2, |
| 871 OEBit o, RCBit r) { |
| 872 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r); |
| 873 } |
| 874 |
| 875 |
| 876 void Assembler::subfc(Register dst, Register src1, Register src2, |
| 877 OEBit o, RCBit r) { |
| 878 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r); |
| 879 } |
| 880 |
| 881 |
| 882 void Assembler::subfic(Register dst, Register src, const Operand& imm) { |
| 883 d_form(SUBFIC, dst, src, imm.imm_, true); |
| 884 } |
| 885 |
| 886 |
| 887 void Assembler::add(Register dst, Register src1, Register src2, |
| 888 OEBit o, RCBit r) { |
| 889 xo_form(EXT2 | ADDX, dst, src1, src2, o, r); |
| 890 } |
| 891 |
| 892 |
| 893 // Multiply low word |
| 894 void Assembler::mullw(Register dst, Register src1, Register src2, |
| 895 OEBit o, RCBit r) { |
| 896 xo_form(EXT2 | MULLW, dst, src1, src2, o, r); |
| 897 } |
| 898 |
| 899 |
| 900 // Multiply hi word |
| 901 void Assembler::mulhw(Register dst, Register src1, Register src2, |
| 902 OEBit o, RCBit r) { |
| 903 xo_form(EXT2 | MULHWX, dst, src1, src2, o, r); |
| 904 } |
| 905 |
| 906 |
| 907 // Divide word |
| 908 void Assembler::divw(Register dst, Register src1, Register src2, |
| 909 OEBit o, RCBit r) { |
| 910 xo_form(EXT2 | DIVW, dst, src1, src2, o, r); |
| 911 } |
| 912 |
| 913 |
| 914 void Assembler::addi(Register dst, Register src, const Operand& imm) { |
| 915 ASSERT(!src.is(r0)); // use li instead to show intent |
| 916 d_form(ADDI, dst, src, imm.imm_, true); |
| 917 } |
| 918 |
| 919 |
| 920 void Assembler::addis(Register dst, Register src, const Operand& imm) { |
| 921 ASSERT(!src.is(r0)); // use lis instead to show intent |
| 922 d_form(ADDIS, dst, src, imm.imm_, true); |
| 923 } |
| 924 |
| 925 |
| 926 void Assembler::addic(Register dst, Register src, const Operand& imm) { |
| 927 d_form(ADDIC, dst, src, imm.imm_, true); |
| 928 } |
| 929 |
| 930 |
| 931 void Assembler::andi(Register ra, Register rs, const Operand& imm) { |
| 932 d_form(ANDIx, rs, ra, imm.imm_, false); |
| 933 } |
| 934 |
| 935 |
| 936 void Assembler::andis(Register ra, Register rs, const Operand& imm) { |
| 937 d_form(ANDISx, rs, ra, imm.imm_, false); |
| 938 } |
| 939 |
| 940 |
| 941 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) { |
| 942 x_form(EXT2 | NORX, dst, src1, src2, r); |
| 943 } |
| 944 |
| 945 |
| 946 void Assembler::notx(Register dst, Register src, RCBit r) { |
| 947 x_form(EXT2 | NORX, dst, src, src, r); |
| 948 } |
| 949 |
| 950 |
| 951 void Assembler::ori(Register ra, Register rs, const Operand& imm) { |
| 952 d_form(ORI, rs, ra, imm.imm_, false); |
| 953 } |
| 954 |
| 955 |
| 956 void Assembler::oris(Register dst, Register src, const Operand& imm) { |
| 957 d_form(ORIS, src, dst, imm.imm_, false); |
| 958 } |
| 959 |
| 960 |
| 961 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) { |
| 962 x_form(EXT2 | ORX, dst, src1, src2, rc); |
| 963 } |
| 964 |
| 965 |
| 966 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) { |
| 967 intptr_t imm16 = src2.imm_; |
| 968 #if V8_TARGET_ARCH_PPC64 |
| 969 int L = 1; |
| 970 #else |
| 971 int L = 0; |
| 972 #endif |
| 973 ASSERT(is_int16(imm16)); |
| 974 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 975 imm16 &= kImm16Mask; |
| 976 emit(CMPI | cr.code()*B23 | L*B21 | src1.code()*B16 | imm16); |
| 977 } |
| 978 |
| 979 |
| 980 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) { |
| 981 uintptr_t uimm16 = src2.imm_; |
| 982 #if V8_TARGET_ARCH_PPC64 |
| 983 int L = 1; |
| 984 #else |
| 985 int L = 0; |
| 986 #endif |
| 987 ASSERT(is_uint16(uimm16)); |
| 988 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 989 uimm16 &= kImm16Mask; |
| 990 emit(CMPLI | cr.code()*B23 | L*B21 | src1.code()*B16 | uimm16); |
| 991 } |
| 992 |
| 993 |
| 994 void Assembler::cmp(Register src1, Register src2, CRegister cr) { |
| 995 #if V8_TARGET_ARCH_PPC64 |
| 996 int L = 1; |
| 997 #else |
| 998 int L = 0; |
| 999 #endif |
| 1000 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 1001 emit(EXT2 | CMP | cr.code()*B23 | L*B21 | src1.code()*B16 | |
| 1002 src2.code()*B11); |
| 1003 } |
| 1004 |
| 1005 |
| 1006 void Assembler::cmpl(Register src1, Register src2, CRegister cr) { |
| 1007 #if V8_TARGET_ARCH_PPC64 |
| 1008 int L = 1; |
| 1009 #else |
| 1010 int L = 0; |
| 1011 #endif |
| 1012 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 1013 emit(EXT2 | CMPL | cr.code()*B23 | L*B21 | src1.code()*B16 | |
| 1014 src2.code()*B11); |
| 1015 } |
| 1016 |
| 1017 |
| 1018 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) { |
| 1019 intptr_t imm16 = src2.imm_; |
| 1020 int L = 0; |
| 1021 ASSERT(is_int16(imm16)); |
| 1022 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 1023 imm16 &= kImm16Mask; |
| 1024 emit(CMPI | cr.code()*B23 | L*B21 | src1.code()*B16 | imm16); |
| 1025 } |
| 1026 |
| 1027 |
| 1028 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) { |
| 1029 uintptr_t uimm16 = src2.imm_; |
| 1030 int L = 0; |
| 1031 ASSERT(is_uint16(uimm16)); |
| 1032 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 1033 uimm16 &= kImm16Mask; |
| 1034 emit(CMPLI | cr.code()*B23 | L*B21 | src1.code()*B16 | uimm16); |
| 1035 } |
| 1036 |
| 1037 |
| 1038 void Assembler::cmpw(Register src1, Register src2, CRegister cr) { |
| 1039 int L = 0; |
| 1040 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 1041 emit(EXT2 | CMP | cr.code()*B23 | L*B21 | src1.code()*B16 | |
| 1042 src2.code()*B11); |
| 1043 } |
| 1044 |
| 1045 |
| 1046 void Assembler::cmplw(Register src1, Register src2, CRegister cr) { |
| 1047 int L = 0; |
| 1048 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 1049 emit(EXT2 | CMPL | cr.code()*B23 | L*B21 | src1.code()*B16 | |
| 1050 src2.code()*B11); |
| 1051 } |
| 1052 |
| 1053 |
| 1054 // Pseudo op - load immediate |
| 1055 void Assembler::li(Register dst, const Operand &imm) { |
| 1056 d_form(ADDI, dst, r0, imm.imm_, true); |
| 1057 } |
| 1058 |
| 1059 |
| 1060 void Assembler::lis(Register dst, const Operand& imm) { |
| 1061 d_form(ADDIS, dst, r0, imm.imm_, true); |
| 1062 } |
| 1063 |
| 1064 |
| 1065 // Pseudo op - move register |
| 1066 void Assembler::mr(Register dst, Register src) { |
| 1067 // actually or(dst, src, src) |
| 1068 orx(dst, src, src); |
| 1069 } |
| 1070 |
| 1071 |
| 1072 void Assembler::lbz(Register dst, const MemOperand &src) { |
| 1073 ASSERT(!src.ra_.is(r0)); |
| 1074 d_form(LBZ, dst, src.ra(), src.offset(), true); |
| 1075 } |
| 1076 |
| 1077 |
| 1078 void Assembler::lbzx(Register rt, const MemOperand &src) { |
| 1079 Register ra = src.ra(); |
| 1080 Register rb = src.rb(); |
| 1081 ASSERT(!ra.is(r0)); |
| 1082 emit(EXT2 | LBZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1083 } |
| 1084 |
| 1085 |
| 1086 void Assembler::lbzux(Register rt, const MemOperand & src) { |
| 1087 Register ra = src.ra(); |
| 1088 Register rb = src.rb(); |
| 1089 ASSERT(!ra.is(r0)); |
| 1090 emit(EXT2 | LBZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1091 } |
| 1092 |
| 1093 |
| 1094 void Assembler::lhz(Register dst, const MemOperand &src) { |
| 1095 ASSERT(!src.ra_.is(r0)); |
| 1096 d_form(LHZ, dst, src.ra(), src.offset(), true); |
| 1097 } |
| 1098 |
| 1099 |
| 1100 void Assembler::lhzx(Register rt, const MemOperand &src) { |
| 1101 Register ra = src.ra(); |
| 1102 Register rb = src.rb(); |
| 1103 ASSERT(!ra.is(r0)); |
| 1104 emit(EXT2 | LHZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1105 } |
| 1106 |
| 1107 |
| 1108 void Assembler::lhzux(Register rt, const MemOperand & src) { |
| 1109 Register ra = src.ra(); |
| 1110 Register rb = src.rb(); |
| 1111 ASSERT(!ra.is(r0)); |
| 1112 emit(EXT2 | LHZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1113 } |
| 1114 |
| 1115 |
| 1116 void Assembler::lwz(Register dst, const MemOperand &src) { |
| 1117 ASSERT(!src.ra_.is(r0)); |
| 1118 d_form(LWZ, dst, src.ra(), src.offset(), true); |
| 1119 } |
| 1120 |
| 1121 |
| 1122 void Assembler::lwzu(Register dst, const MemOperand &src) { |
| 1123 ASSERT(!src.ra_.is(r0)); |
| 1124 d_form(LWZU, dst, src.ra(), src.offset(), true); |
| 1125 } |
| 1126 |
| 1127 |
| 1128 void Assembler::lwzx(Register rt, const MemOperand &src) { |
| 1129 Register ra = src.ra(); |
| 1130 Register rb = src.rb(); |
| 1131 ASSERT(!ra.is(r0)); |
| 1132 emit(EXT2 | LWZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1133 } |
| 1134 |
| 1135 |
| 1136 void Assembler::lwzux(Register rt, const MemOperand & src) { |
| 1137 Register ra = src.ra(); |
| 1138 Register rb = src.rb(); |
| 1139 ASSERT(!ra.is(r0)); |
| 1140 emit(EXT2 | LWZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1141 } |
| 1142 |
| 1143 |
| 1144 void Assembler::lwa(Register dst, const MemOperand &src) { |
| 1145 #if V8_TARGET_ARCH_PPC64 |
| 1146 int offset = src.offset(); |
| 1147 ASSERT(!src.ra_.is(r0)); |
| 1148 ASSERT(!(offset & 3) && is_int16(offset)); |
| 1149 offset = kImm16Mask & offset; |
| 1150 emit(LD | dst.code()*B21 | src.ra().code()*B16 | offset | 2); |
| 1151 #else |
| 1152 lwz(dst, src); |
| 1153 #endif |
| 1154 } |
| 1155 |
| 1156 |
| 1157 void Assembler::stb(Register dst, const MemOperand &src) { |
| 1158 ASSERT(!src.ra_.is(r0)); |
| 1159 d_form(STB, dst, src.ra(), src.offset(), true); |
| 1160 } |
| 1161 |
| 1162 |
| 1163 void Assembler::stbx(Register rs, const MemOperand &src) { |
| 1164 Register ra = src.ra(); |
| 1165 Register rb = src.rb(); |
| 1166 ASSERT(!ra.is(r0)); |
| 1167 emit(EXT2 | STBX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1168 } |
| 1169 |
| 1170 |
| 1171 void Assembler::stbux(Register rs, const MemOperand &src) { |
| 1172 Register ra = src.ra(); |
| 1173 Register rb = src.rb(); |
| 1174 ASSERT(!ra.is(r0)); |
| 1175 emit(EXT2 | STBUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1176 } |
| 1177 |
| 1178 |
| 1179 void Assembler::sth(Register dst, const MemOperand &src) { |
| 1180 ASSERT(!src.ra_.is(r0)); |
| 1181 d_form(STH, dst, src.ra(), src.offset(), true); |
| 1182 } |
| 1183 |
| 1184 |
| 1185 void Assembler::sthx(Register rs, const MemOperand &src) { |
| 1186 Register ra = src.ra(); |
| 1187 Register rb = src.rb(); |
| 1188 ASSERT(!ra.is(r0)); |
| 1189 emit(EXT2 | STHX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1190 } |
| 1191 |
| 1192 |
| 1193 void Assembler::sthux(Register rs, const MemOperand &src) { |
| 1194 Register ra = src.ra(); |
| 1195 Register rb = src.rb(); |
| 1196 ASSERT(!ra.is(r0)); |
| 1197 emit(EXT2 | STHUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1198 } |
| 1199 |
| 1200 |
| 1201 void Assembler::stw(Register dst, const MemOperand &src) { |
| 1202 ASSERT(!src.ra_.is(r0)); |
| 1203 d_form(STW, dst, src.ra(), src.offset(), true); |
| 1204 } |
| 1205 |
| 1206 |
| 1207 void Assembler::stwu(Register dst, const MemOperand &src) { |
| 1208 ASSERT(!src.ra_.is(r0)); |
| 1209 d_form(STWU, dst, src.ra(), src.offset(), true); |
| 1210 } |
| 1211 |
| 1212 |
| 1213 void Assembler::stwx(Register rs, const MemOperand &src) { |
| 1214 Register ra = src.ra(); |
| 1215 Register rb = src.rb(); |
| 1216 ASSERT(!ra.is(r0)); |
| 1217 emit(EXT2 | STWX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1218 } |
| 1219 |
| 1220 |
| 1221 void Assembler::stwux(Register rs, const MemOperand &src) { |
| 1222 Register ra = src.ra(); |
| 1223 Register rb = src.rb(); |
| 1224 ASSERT(!ra.is(r0)); |
| 1225 emit(EXT2 | STWUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1226 } |
| 1227 |
| 1228 |
| 1229 void Assembler::extsb(Register rs, Register ra, RCBit rc) { |
| 1230 emit(EXT2 | EXTSB | ra.code()*B21 | rs.code()*B16 | rc); |
| 1231 } |
| 1232 |
| 1233 |
| 1234 void Assembler::extsh(Register rs, Register ra, RCBit rc) { |
| 1235 emit(EXT2 | EXTSH | ra.code()*B21 | rs.code()*B16 | rc); |
| 1236 } |
| 1237 |
| 1238 |
| 1239 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) { |
| 1240 emit(EXT2 | NEGX | rt.code()*B21 | ra.code()*B16 | o | r); |
| 1241 } |
| 1242 |
| 1243 |
| 1244 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) { |
| 1245 x_form(EXT2 | ANDCX, dst, src1, src2, rc); |
| 1246 } |
| 1247 |
| 1248 |
| 1249 #if V8_TARGET_ARCH_PPC64 |
| 1250 // 64bit specific instructions |
| 1251 void Assembler::ld(Register rd, const MemOperand &src) { |
| 1252 int offset = src.offset(); |
| 1253 ASSERT(!src.ra_.is(r0)); |
| 1254 ASSERT(!(offset & 3) && is_int16(offset)); |
| 1255 offset = kImm16Mask & offset; |
| 1256 emit(LD | rd.code()*B21 | src.ra().code()*B16 | offset); |
| 1257 } |
| 1258 |
| 1259 |
| 1260 void Assembler::ldx(Register rd, const MemOperand &src) { |
| 1261 Register ra = src.ra(); |
| 1262 Register rb = src.rb(); |
| 1263 ASSERT(!ra.is(r0)); |
| 1264 emit(EXT2 | LDX | rd.code()*B21 | ra.code()*B16 | rb.code()*B11); |
| 1265 } |
| 1266 |
| 1267 |
| 1268 void Assembler::ldu(Register rd, const MemOperand &src) { |
| 1269 int offset = src.offset(); |
| 1270 ASSERT(!src.ra_.is(r0)); |
| 1271 ASSERT(!(offset & 3) && is_int16(offset)); |
| 1272 offset = kImm16Mask & offset; |
| 1273 emit(LD | rd.code()*B21 | src.ra().code()*B16 | offset | 1); |
| 1274 } |
| 1275 |
| 1276 |
| 1277 void Assembler::ldux(Register rd, const MemOperand &src) { |
| 1278 Register ra = src.ra(); |
| 1279 Register rb = src.rb(); |
| 1280 ASSERT(!ra.is(r0)); |
| 1281 emit(EXT2 | LDUX | rd.code()*B21 | ra.code()*B16 | rb.code()*B11); |
| 1282 } |
| 1283 |
| 1284 |
| 1285 void Assembler::std(Register rs, const MemOperand &src) { |
| 1286 int offset = src.offset(); |
| 1287 ASSERT(!src.ra_.is(r0)); |
| 1288 ASSERT(!(offset & 3) && is_int16(offset)); |
| 1289 offset = kImm16Mask & offset; |
| 1290 emit(STD | rs.code()*B21 | src.ra().code()*B16 | offset); |
| 1291 } |
| 1292 |
| 1293 |
| 1294 void Assembler::stdx(Register rs, const MemOperand &src) { |
| 1295 Register ra = src.ra(); |
| 1296 Register rb = src.rb(); |
| 1297 ASSERT(!ra.is(r0)); |
| 1298 emit(EXT2 | STDX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11); |
| 1299 } |
| 1300 |
| 1301 |
| 1302 void Assembler::stdu(Register rs, const MemOperand &src) { |
| 1303 int offset = src.offset(); |
| 1304 ASSERT(!src.ra_.is(r0)); |
| 1305 ASSERT(!(offset & 3) && is_int16(offset)); |
| 1306 offset = kImm16Mask & offset; |
| 1307 emit(STD | rs.code()*B21 | src.ra().code()*B16 | offset | 1); |
| 1308 } |
| 1309 |
| 1310 |
| 1311 void Assembler::stdux(Register rs, const MemOperand &src) { |
| 1312 Register ra = src.ra(); |
| 1313 Register rb = src.rb(); |
| 1314 ASSERT(!ra.is(r0)); |
| 1315 emit(EXT2 | STDUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11); |
| 1316 } |
| 1317 |
| 1318 |
| 1319 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1320 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r); |
| 1321 } |
| 1322 |
| 1323 |
| 1324 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1325 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r); |
| 1326 } |
| 1327 |
| 1328 |
| 1329 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) { |
| 1330 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r); |
| 1331 } |
| 1332 |
| 1333 |
| 1334 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) { |
| 1335 md_form(EXT5 | RLDICR, ra, rs, sh, me, r); |
| 1336 } |
| 1337 |
| 1338 |
| 1339 void Assembler::sldi(Register dst, Register src, const Operand& val, |
| 1340 RCBit rc) { |
| 1341 ASSERT((64 > val.imm_) && (val.imm_ >= 0)); |
| 1342 rldicr(dst, src, val.imm_, 63-val.imm_, rc); |
| 1343 } |
| 1344 |
| 1345 |
| 1346 void Assembler::srdi(Register dst, Register src, const Operand& val, |
| 1347 RCBit rc) { |
| 1348 ASSERT((64 > val.imm_) && (val.imm_ >= 0)); |
| 1349 rldicl(dst, src, 64-val.imm_, val.imm_, rc); |
| 1350 } |
| 1351 |
| 1352 |
| 1353 void Assembler::clrrdi(Register dst, Register src, const Operand& val, |
| 1354 RCBit rc) { |
| 1355 ASSERT((64 > val.imm_) && (val.imm_ >= 0)); |
| 1356 rldicr(dst, src, 0, 63-val.imm_, rc); |
| 1357 } |
| 1358 |
| 1359 |
| 1360 void Assembler::clrldi(Register dst, Register src, const Operand& val, |
| 1361 RCBit rc) { |
| 1362 ASSERT((64 > val.imm_) && (val.imm_ >= 0)); |
| 1363 rldicl(dst, src, 0, val.imm_, rc); |
| 1364 } |
| 1365 |
| 1366 |
| 1367 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) { |
| 1368 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r); |
| 1369 } |
| 1370 |
| 1371 |
| 1372 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) { |
| 1373 int sh0_4 = sh & 0x1f; |
| 1374 int sh5 = (sh >> 5) & 0x1; |
| 1375 |
| 1376 emit(EXT2 | SRADIX | rs.code()*B21 | ra.code()*B16 | sh0_4*B11 | sh5*B1 | r); |
| 1377 } |
| 1378 |
| 1379 |
| 1380 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) { |
| 1381 x_form(EXT2 | SRDX, dst, src1, src2, r); |
| 1382 } |
| 1383 |
| 1384 |
| 1385 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) { |
| 1386 x_form(EXT2 | SLDX, dst, src1, src2, r); |
| 1387 } |
| 1388 |
| 1389 |
| 1390 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) { |
| 1391 x_form(EXT2 | SRAD, ra, rs, rb, r); |
| 1392 } |
| 1393 |
| 1394 |
| 1395 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) { |
| 1396 rldcl(ra, rs, rb, 0, r); |
| 1397 } |
| 1398 |
| 1399 |
| 1400 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) { |
| 1401 rldicl(ra, rs, sh, 0, r); |
| 1402 } |
| 1403 |
| 1404 |
| 1405 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) { |
| 1406 rldicl(ra, rs, 64 - sh, 0, r); |
| 1407 } |
| 1408 |
| 1409 |
| 1410 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) { |
| 1411 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc); |
| 1412 } |
| 1413 |
| 1414 |
| 1415 void Assembler::extsw(Register rs, Register ra, RCBit rc) { |
| 1416 emit(EXT2 | EXTSW | ra.code()*B21 | rs.code()*B16 | rc); |
| 1417 } |
| 1418 |
| 1419 |
| 1420 void Assembler::mulld(Register dst, Register src1, Register src2, |
| 1421 OEBit o, RCBit r) { |
| 1422 xo_form(EXT2 | MULLD, dst, src1, src2, o, r); |
| 1423 } |
| 1424 |
| 1425 |
| 1426 void Assembler::divd(Register dst, Register src1, Register src2, |
| 1427 OEBit o, RCBit r) { |
| 1428 xo_form(EXT2 | DIVD, dst, src1, src2, o, r); |
| 1429 } |
| 1430 #endif |
| 1431 |
| 1432 |
| 1433 void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) { |
| 1434 ASSERT(fopcode < fLastFaker); |
| 1435 emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode); |
| 1436 } |
| 1437 |
| 1438 |
| 1439 void Assembler::marker_asm(int mcode) { |
| 1440 if (::v8::internal::FLAG_trace_sim_stubs) { |
| 1441 ASSERT(mcode < F_NEXT_AVAILABLE_STUB_MARKER); |
| 1442 emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode); |
| 1443 } |
| 1444 } |
| 1445 |
| 1446 |
| 1447 // Function descriptor for AIX. |
| 1448 // Code address skips the function descriptor "header". |
| 1449 // TOC and static chain are ignored and set to 0. |
| 1450 void Assembler::function_descriptor() { |
| 1451 ASSERT(pc_offset() == 0); |
| 1452 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); |
| 1453 emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize); |
| 1454 emit_ptr(0); |
| 1455 emit_ptr(0); |
| 1456 } |
| 1457 |
| 1458 |
| 1459 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 1460 void Assembler::RelocateInternalReference(Address pc, |
| 1461 intptr_t delta, |
| 1462 Address code_start, |
| 1463 ICacheFlushMode icache_flush_mode) { |
| 1464 ASSERT(delta || code_start); |
| 1465 #if ABI_USES_FUNCTION_DESCRIPTORS |
| 1466 uintptr_t *fd = reinterpret_cast<uintptr_t*>(pc); |
| 1467 if (fd[1] == 0 && fd[2] == 0) { |
| 1468 // Function descriptor |
| 1469 if (delta) { |
| 1470 fd[0] += delta; |
| 1471 } else { |
| 1472 fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize; |
| 1473 } |
| 1474 return; |
| 1475 } |
| 1476 #endif |
| 1477 #if V8_OOL_CONSTANT_POOL |
| 1478 // mov for LoadConstantPoolPointerRegister |
| 1479 ConstantPoolArray *constant_pool = NULL; |
| 1480 if (delta) { |
| 1481 code_start = target_address_at(pc, constant_pool) + delta; |
| 1482 } |
| 1483 set_target_address_at(pc, constant_pool, code_start, icache_flush_mode); |
| 1484 #endif |
| 1485 } |
| 1486 |
| 1487 |
| 1488 int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) { |
| 1489 #if ABI_USES_FUNCTION_DESCRIPTORS |
| 1490 uintptr_t *fd = reinterpret_cast<uintptr_t*>(pc); |
| 1491 if (fd[1] == 0 && fd[2] == 0) { |
| 1492 // Function descriptor |
| 1493 SNPrintF(buffer, |
| 1494 "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR "]" |
| 1495 " function descriptor", |
| 1496 fd[0], fd[1], fd[2]); |
| 1497 return kPointerSize * 3; |
| 1498 } |
| 1499 #endif |
| 1500 return 0; |
| 1501 } |
| 1502 #endif |
| 1503 |
| 1504 |
| 1505 int Assembler::instructions_required_for_mov(const Operand& x) const { |
| 1506 #if V8_OOL_CONSTANT_POOL || DEBUG |
| 1507 bool canOptimize = !(x.must_output_reloc_info(this) || |
| 1508 is_trampoline_pool_blocked()); |
| 1509 #endif |
| 1510 #if V8_OOL_CONSTANT_POOL |
| 1511 if (use_constant_pool_for_mov(x, canOptimize)) { |
| 1512 if (use_extended_constant_pool()) { |
| 1513 return kMovInstructionsExtendedConstantPool; |
| 1514 } |
| 1515 return kMovInstructionsConstantPool; |
| 1516 } |
| 1517 #endif |
| 1518 ASSERT(!canOptimize); |
| 1519 return kMovInstructionsNoConstantPool; |
| 1520 } |
| 1521 |
| 1522 |
| 1523 #if V8_OOL_CONSTANT_POOL |
| 1524 bool Assembler::use_constant_pool_for_mov(const Operand& x, |
| 1525 bool canOptimize) const { |
| 1526 if (!is_constant_pool_available()) { |
| 1527 // If there is no constant pool available, we must use a mov |
| 1528 // immediate sequence. |
| 1529 return false; |
| 1530 } |
| 1531 |
| 1532 intptr_t value = x.immediate(); |
| 1533 if (canOptimize && is_int16(value)) { |
| 1534 // Prefer a single-instruction load-immediate. |
| 1535 return false; |
| 1536 } |
| 1537 |
| 1538 if (use_extended_constant_pool()) { |
| 1539 // Prefer a two instruction mov immediate sequence over the constant |
| 1540 // pool's extended section. |
| 1541 #if V8_TARGET_ARCH_PPC64 |
| 1542 // TODO(mbrandy): enable extended constant pool usage for 64-bit. |
| 1543 // See ARM commit e27ab337 for a reference. |
| 1544 // if (canOptimize && is_int32(value)) { |
| 1545 return false; |
| 1546 // } |
| 1547 #else |
| 1548 return false; |
| 1549 #endif |
| 1550 } |
| 1551 |
| 1552 return true; |
| 1553 } |
| 1554 #endif |
| 1555 |
| 1556 |
| 1557 bool Operand::must_output_reloc_info(const Assembler* assembler) const { |
| 1558 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { |
| 1559 if (assembler != NULL && assembler->predictable_code_size()) return true; |
| 1560 return assembler->serializer_enabled(); |
| 1561 } else if (RelocInfo::IsNone(rmode_)) { |
| 1562 return false; |
| 1563 } |
| 1564 return true; |
| 1565 } |
| 1566 |
| 1567 |
| 1568 // Primarily used for loading constants |
| 1569 // This should really move to be in macro-assembler as it |
| 1570 // is really a pseudo instruction |
| 1571 // Some usages of this intend for a FIXED_SEQUENCE to be used |
| 1572 // Todo - break this dependency so we can optimize mov() in general |
| 1573 // and only use the generic version when we require a fixed sequence |
| 1574 void Assembler::mov(Register dst, const Operand& src) { |
| 1575 intptr_t value = src.immediate(); |
| 1576 bool canOptimize; |
| 1577 RelocInfo rinfo(pc_, src.rmode_, value, NULL); |
| 1578 |
| 1579 if (src.must_output_reloc_info(this)) { |
| 1580 RecordRelocInfo(rinfo); |
| 1581 } |
| 1582 |
| 1583 canOptimize = !(src.must_output_reloc_info(this) || |
| 1584 is_trampoline_pool_blocked()); |
| 1585 |
| 1586 #if V8_OOL_CONSTANT_POOL |
| 1587 if (use_constant_pool_for_mov(src, canOptimize)) { |
| 1588 ASSERT(is_constant_pool_available()); |
| 1589 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); |
| 1590 if (section == ConstantPoolArray::EXTENDED_SECTION) { |
| 1591 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1592 #if V8_TARGET_ARCH_PPC64 |
| 1593 // We are forced to use 3 instruction sequence since the constant |
| 1594 // pool pointer is tagged. |
| 1595 lis(dst, Operand::Zero()); |
| 1596 ori(dst, dst, Operand::Zero()); |
| 1597 ldx(dst, MemOperand(kConstantPoolRegister, dst)); |
| 1598 #else |
| 1599 addis(dst, kConstantPoolRegister, Operand::Zero()); |
| 1600 lwz(dst, MemOperand(dst, 0)); |
| 1601 #endif |
| 1602 } else { |
| 1603 ASSERT(section == ConstantPoolArray::SMALL_SECTION); |
| 1604 #if V8_TARGET_ARCH_PPC64 |
| 1605 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1606 // We are forced to use 2 instruction sequence since the constant |
| 1607 // pool pointer is tagged. |
| 1608 li(dst, Operand::Zero()); |
| 1609 ldx(dst, MemOperand(kConstantPoolRegister, dst)); |
| 1610 #else |
| 1611 lwz(dst, MemOperand(kConstantPoolRegister, 0)); |
| 1612 #endif |
| 1613 } |
| 1614 return; |
| 1615 } |
| 1616 #endif |
| 1617 |
| 1618 if (canOptimize) { |
| 1619 if (is_int16(value)) { |
| 1620 li(dst, Operand(value)); |
| 1621 } else { |
| 1622 uint16_t u16; |
| 1623 #if V8_TARGET_ARCH_PPC64 |
| 1624 if (is_int32(value)) { |
| 1625 #endif |
| 1626 lis(dst, Operand(value >> 16)); |
| 1627 #if V8_TARGET_ARCH_PPC64 |
| 1628 } else { |
| 1629 if (is_int48(value)) { |
| 1630 li(dst, Operand(value >> 32)); |
| 1631 } else { |
| 1632 lis(dst, Operand(value >> 48)); |
| 1633 u16 = ((value >> 32) & 0xffff); |
| 1634 if (u16) { |
| 1635 ori(dst, dst, Operand(u16)); |
| 1636 } |
| 1637 } |
| 1638 sldi(dst, dst, Operand(32)); |
| 1639 u16 = ((value >> 16) & 0xffff); |
| 1640 if (u16) { |
| 1641 oris(dst, dst, Operand(u16)); |
| 1642 } |
| 1643 } |
| 1644 #endif |
| 1645 u16 = (value & 0xffff); |
| 1646 if (u16) { |
| 1647 ori(dst, dst, Operand(u16)); |
| 1648 } |
| 1649 } |
| 1650 return; |
| 1651 } |
| 1652 |
| 1653 ASSERT(!canOptimize); |
| 1654 |
| 1655 { |
| 1656 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1657 #if V8_TARGET_ARCH_PPC64 |
| 1658 int32_t hi_32 = static_cast<int32_t>(value >> 32); |
| 1659 int32_t lo_32 = static_cast<int32_t>(value); |
| 1660 int hi_word = static_cast<int>(hi_32 >> 16); |
| 1661 int lo_word = static_cast<int>(hi_32 & 0xffff); |
| 1662 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); |
| 1663 ori(dst, dst, Operand(lo_word)); |
| 1664 sldi(dst, dst, Operand(32)); |
| 1665 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff)); |
| 1666 lo_word = static_cast<int>(lo_32 & 0xffff); |
| 1667 oris(dst, dst, Operand(hi_word)); |
| 1668 ori(dst, dst, Operand(lo_word)); |
| 1669 #else |
| 1670 int hi_word = static_cast<int>(value >> 16); |
| 1671 int lo_word = static_cast<int>(value & 0xffff); |
| 1672 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); |
| 1673 ori(dst, dst, Operand(lo_word)); |
| 1674 #endif |
| 1675 } |
| 1676 } |
| 1677 |
| 1678 |
| 1679 void Assembler::mov_label_offset(Register dst, Label* label) { |
| 1680 if (label->is_bound()) { |
| 1681 int target = label->pos(); |
| 1682 mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag)); |
| 1683 } else { |
| 1684 bool is_linked = label->is_linked(); |
| 1685 // Emit the link to the label in the code stream followed by extra |
| 1686 // nop instructions. |
| 1687 ASSERT(dst.is(r3)); // target_at_put assumes r3 for now |
| 1688 int link = is_linked ? label->pos() - pc_offset(): 0; |
| 1689 label->link_to(pc_offset()); |
| 1690 |
| 1691 if (!is_linked && !trampoline_emitted_) { |
| 1692 unbound_labels_count_++; |
| 1693 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1694 } |
| 1695 |
| 1696 // When the label is bound, these instructions will be patched |
| 1697 // with a 2 instruction mov sequence that will load the |
| 1698 // destination register with the position of the label from the |
| 1699 // beginning of the code. |
| 1700 // |
| 1701 // When the label gets bound: target_at extracts the link and |
| 1702 // target_at_put patches the instructions. |
| 1703 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1704 emit(link); |
| 1705 nop(); |
| 1706 } |
| 1707 } |
| 1708 |
| 1709 |
| 1710 // Special register instructions |
| 1711 void Assembler::crxor(int bt, int ba, int bb) { |
| 1712 emit(EXT1 | CRXOR | bt*B21 | ba*B16 | bb*B11); |
| 1713 } |
| 1714 |
| 1715 |
| 1716 void Assembler::creqv(int bt, int ba, int bb) { |
| 1717 emit(EXT1 | CREQV | bt*B21 | ba*B16 | bb*B11); |
| 1718 } |
| 1719 |
| 1720 |
| 1721 void Assembler::mflr(Register dst) { |
| 1722 emit(EXT2 | MFSPR | dst.code()*B21 | 256 << 11); // Ignore RC bit |
| 1723 } |
| 1724 |
| 1725 |
| 1726 void Assembler::mtlr(Register src) { |
| 1727 emit(EXT2 | MTSPR | src.code()*B21 | 256 << 11); // Ignore RC bit |
| 1728 } |
| 1729 |
| 1730 |
| 1731 void Assembler::mtctr(Register src) { |
| 1732 emit(EXT2 | MTSPR | src.code()*B21 | 288 << 11); // Ignore RC bit |
| 1733 } |
| 1734 |
| 1735 |
| 1736 void Assembler::mtxer(Register src) { |
| 1737 emit(EXT2 | MTSPR | src.code()*B21 | 32 << 11); |
| 1738 } |
| 1739 |
| 1740 |
| 1741 void Assembler::mcrfs(int bf, int bfa) { |
| 1742 emit(EXT4 | MCRFS | bf*B23 | bfa*B18); |
| 1743 } |
| 1744 |
| 1745 |
| 1746 void Assembler::mfcr(Register dst) { |
| 1747 emit(EXT2 | MFCR | dst.code()*B21); |
| 1748 } |
| 1749 |
| 1750 |
| 1751 // Exception-generating instructions and debugging support. |
| 1752 // Stops with a non-negative code less than kNumOfWatchedStops support |
| 1753 // enabling/disabling and a counter feature. See simulator-ppc.h . |
| 1754 void Assembler::stop(const char* msg, Condition cond, int32_t code, |
| 1755 CRegister cr) { |
| 1756 if (cond != al) { |
| 1757 Label skip; |
| 1758 b(NegateCondition(cond), &skip, cr); |
| 1759 bkpt(0); |
| 1760 bind(&skip); |
| 1761 } else { |
| 1762 bkpt(0); |
| 1763 } |
| 1764 } |
| 1765 |
| 1766 |
| 1767 void Assembler::bkpt(uint32_t imm16) { |
| 1768 emit(0x7d821008); |
| 1769 } |
| 1770 |
| 1771 |
| 1772 void Assembler::info(const char* msg, Condition cond, int32_t code, |
| 1773 CRegister cr) { |
| 1774 if (::v8::internal::FLAG_trace_sim_stubs) { |
| 1775 emit(0x7d9ff808); |
| 1776 #if V8_TARGET_ARCH_PPC64 |
| 1777 uint64_t value = reinterpret_cast<uint64_t>(msg); |
| 1778 emit(static_cast<uint32_t>(value >> 32)); |
| 1779 emit(static_cast<uint32_t>(value & 0xFFFFFFFF)); |
| 1780 #else |
| 1781 emit(reinterpret_cast<Instr>(msg)); |
| 1782 #endif |
| 1783 } |
| 1784 } |
| 1785 |
| 1786 |
| 1787 void Assembler::dcbf(Register ra, Register rb) { |
| 1788 emit(EXT2 | DCBF | ra.code()*B16 | rb.code()*B11); |
| 1789 } |
| 1790 |
| 1791 |
| 1792 void Assembler::sync() { |
| 1793 emit(EXT2 | SYNC); |
| 1794 } |
| 1795 |
| 1796 |
| 1797 void Assembler::icbi(Register ra, Register rb) { |
| 1798 emit(EXT2 | ICBI | ra.code()*B16 | rb.code()*B11); |
| 1799 } |
| 1800 |
| 1801 |
| 1802 void Assembler::isync() { |
| 1803 emit(EXT1 | ISYNC); |
| 1804 } |
| 1805 |
| 1806 |
| 1807 // Floating point support |
| 1808 |
| 1809 void Assembler::lfd(const DoubleRegister frt, const MemOperand &src) { |
| 1810 int offset = src.offset(); |
| 1811 Register ra = src.ra(); |
| 1812 ASSERT(is_int16(offset)); |
| 1813 int imm16 = offset & kImm16Mask; |
| 1814 // could be x_form instruction with some casting magic |
| 1815 emit(LFD | frt.code()*B21 | ra.code()*B16 | imm16); |
| 1816 } |
| 1817 |
| 1818 |
| 1819 void Assembler::lfdu(const DoubleRegister frt, const MemOperand &src) { |
| 1820 int offset = src.offset(); |
| 1821 Register ra = src.ra(); |
| 1822 ASSERT(is_int16(offset)); |
| 1823 int imm16 = offset & kImm16Mask; |
| 1824 // could be x_form instruction with some casting magic |
| 1825 emit(LFDU | frt.code()*B21 | ra.code()*B16 | imm16); |
| 1826 } |
| 1827 |
| 1828 |
| 1829 void Assembler::lfdx(const DoubleRegister frt, const MemOperand &src) { |
| 1830 Register ra = src.ra(); |
| 1831 Register rb = src.rb(); |
| 1832 ASSERT(!ra.is(r0)); |
| 1833 emit(EXT2 | LFDX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1834 } |
| 1835 |
| 1836 |
| 1837 void Assembler::lfdux(const DoubleRegister frt, const MemOperand & src) { |
| 1838 Register ra = src.ra(); |
| 1839 Register rb = src.rb(); |
| 1840 ASSERT(!ra.is(r0)); |
| 1841 emit(EXT2 | LFDUX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1842 } |
| 1843 |
| 1844 |
| 1845 void Assembler::lfs(const DoubleRegister frt, const MemOperand &src) { |
| 1846 int offset = src.offset(); |
| 1847 Register ra = src.ra(); |
| 1848 ASSERT(is_int16(offset)); |
| 1849 ASSERT(!ra.is(r0)); |
| 1850 int imm16 = offset & kImm16Mask; |
| 1851 // could be x_form instruction with some casting magic |
| 1852 emit(LFS | frt.code()*B21 | ra.code()*B16 | imm16); |
| 1853 } |
| 1854 |
| 1855 |
| 1856 void Assembler::lfsu(const DoubleRegister frt, const MemOperand &src) { |
| 1857 int offset = src.offset(); |
| 1858 Register ra = src.ra(); |
| 1859 ASSERT(is_int16(offset)); |
| 1860 ASSERT(!ra.is(r0)); |
| 1861 int imm16 = offset & kImm16Mask; |
| 1862 // could be x_form instruction with some casting magic |
| 1863 emit(LFSU | frt.code()*B21 | ra.code()*B16 | imm16); |
| 1864 } |
| 1865 |
| 1866 |
| 1867 void Assembler::lfsx(const DoubleRegister frt, const MemOperand &src) { |
| 1868 Register ra = src.ra(); |
| 1869 Register rb = src.rb(); |
| 1870 ASSERT(!ra.is(r0)); |
| 1871 emit(EXT2 | LFSX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1872 } |
| 1873 |
| 1874 |
| 1875 void Assembler::lfsux(const DoubleRegister frt, const MemOperand & src) { |
| 1876 Register ra = src.ra(); |
| 1877 Register rb = src.rb(); |
| 1878 ASSERT(!ra.is(r0)); |
| 1879 emit(EXT2 | LFSUX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1880 } |
| 1881 |
| 1882 |
| 1883 void Assembler::stfd(const DoubleRegister frs, const MemOperand &src) { |
| 1884 int offset = src.offset(); |
| 1885 Register ra = src.ra(); |
| 1886 ASSERT(is_int16(offset)); |
| 1887 ASSERT(!ra.is(r0)); |
| 1888 int imm16 = offset & kImm16Mask; |
| 1889 // could be x_form instruction with some casting magic |
| 1890 emit(STFD | frs.code()*B21 | ra.code()*B16 | imm16); |
| 1891 } |
| 1892 |
| 1893 |
| 1894 void Assembler::stfdu(const DoubleRegister frs, const MemOperand &src) { |
| 1895 int offset = src.offset(); |
| 1896 Register ra = src.ra(); |
| 1897 ASSERT(is_int16(offset)); |
| 1898 ASSERT(!ra.is(r0)); |
| 1899 int imm16 = offset & kImm16Mask; |
| 1900 // could be x_form instruction with some casting magic |
| 1901 emit(STFDU | frs.code()*B21 | ra.code()*B16 | imm16); |
| 1902 } |
| 1903 |
| 1904 |
| 1905 void Assembler::stfdx(const DoubleRegister frs, const MemOperand &src) { |
| 1906 Register ra = src.ra(); |
| 1907 Register rb = src.rb(); |
| 1908 ASSERT(!ra.is(r0)); |
| 1909 emit(EXT2 | STFDX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); |
| 1910 } |
| 1911 |
| 1912 |
| 1913 void Assembler::stfdux(const DoubleRegister frs, const MemOperand &src) { |
| 1914 Register ra = src.ra(); |
| 1915 Register rb = src.rb(); |
| 1916 ASSERT(!ra.is(r0)); |
| 1917 emit(EXT2 | STFDUX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); |
| 1918 } |
| 1919 |
| 1920 |
| 1921 void Assembler::stfs(const DoubleRegister frs, const MemOperand &src) { |
| 1922 int offset = src.offset(); |
| 1923 Register ra = src.ra(); |
| 1924 ASSERT(is_int16(offset)); |
| 1925 ASSERT(!ra.is(r0)); |
| 1926 int imm16 = offset & kImm16Mask; |
| 1927 // could be x_form instruction with some casting magic |
| 1928 emit(STFS | frs.code()*B21 | ra.code()*B16 | imm16); |
| 1929 } |
| 1930 |
| 1931 |
| 1932 void Assembler::stfsu(const DoubleRegister frs, const MemOperand &src) { |
| 1933 int offset = src.offset(); |
| 1934 Register ra = src.ra(); |
| 1935 ASSERT(is_int16(offset)); |
| 1936 ASSERT(!ra.is(r0)); |
| 1937 int imm16 = offset & kImm16Mask; |
| 1938 // could be x_form instruction with some casting magic |
| 1939 emit(STFSU | frs.code()*B21 | ra.code()*B16 | imm16); |
| 1940 } |
| 1941 |
| 1942 |
| 1943 void Assembler::stfsx(const DoubleRegister frs, const MemOperand &src) { |
| 1944 Register ra = src.ra(); |
| 1945 Register rb = src.rb(); |
| 1946 ASSERT(!ra.is(r0)); |
| 1947 emit(EXT2 | STFSX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); |
| 1948 } |
| 1949 |
| 1950 |
| 1951 void Assembler::stfsux(const DoubleRegister frs, const MemOperand &src) { |
| 1952 Register ra = src.ra(); |
| 1953 Register rb = src.rb(); |
| 1954 ASSERT(!ra.is(r0)); |
| 1955 emit(EXT2 | STFSUX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); |
| 1956 } |
| 1957 |
| 1958 |
| 1959 void Assembler::fsub(const DoubleRegister frt, |
| 1960 const DoubleRegister fra, |
| 1961 const DoubleRegister frb, |
| 1962 RCBit rc) { |
| 1963 a_form(EXT4 | FSUB, frt, fra, frb, rc); |
| 1964 } |
| 1965 |
| 1966 |
| 1967 void Assembler::fadd(const DoubleRegister frt, |
| 1968 const DoubleRegister fra, |
| 1969 const DoubleRegister frb, |
| 1970 RCBit rc) { |
| 1971 a_form(EXT4 | FADD, frt, fra, frb, rc); |
| 1972 } |
| 1973 |
| 1974 |
| 1975 void Assembler::fmul(const DoubleRegister frt, |
| 1976 const DoubleRegister fra, |
| 1977 const DoubleRegister frc, |
| 1978 RCBit rc) { |
| 1979 emit(EXT4 | FMUL | frt.code()*B21 | fra.code()*B16 | frc.code()*B6 | rc); |
| 1980 } |
| 1981 |
| 1982 |
| 1983 void Assembler::fdiv(const DoubleRegister frt, |
| 1984 const DoubleRegister fra, |
| 1985 const DoubleRegister frb, |
| 1986 RCBit rc) { |
| 1987 a_form(EXT4 | FDIV, frt, fra, frb, rc); |
| 1988 } |
| 1989 |
| 1990 |
| 1991 void Assembler::fcmpu(const DoubleRegister fra, |
| 1992 const DoubleRegister frb, |
| 1993 CRegister cr) { |
| 1994 ASSERT(cr.code() >= 0 && cr.code() <= 7); |
| 1995 emit(EXT4 | FCMPU | cr.code()*B23 | fra.code()*B16 | frb.code()*B11); |
| 1996 } |
| 1997 |
| 1998 |
| 1999 void Assembler::fmr(const DoubleRegister frt, |
| 2000 const DoubleRegister frb, |
| 2001 RCBit rc) { |
| 2002 emit(EXT4 | FMR | frt.code()*B21 | frb.code()*B11 | rc); |
| 2003 } |
| 2004 |
| 2005 |
| 2006 void Assembler::fctiwz(const DoubleRegister frt, |
| 2007 const DoubleRegister frb) { |
| 2008 emit(EXT4 | FCTIWZ | frt.code()*B21 | frb.code()*B11); |
| 2009 } |
| 2010 |
| 2011 |
| 2012 void Assembler::fctiw(const DoubleRegister frt, |
| 2013 const DoubleRegister frb) { |
| 2014 emit(EXT4 | FCTIW | frt.code()*B21 | frb.code()*B11); |
| 2015 } |
| 2016 |
| 2017 |
| 2018 void Assembler::frim(const DoubleRegister frt, |
| 2019 const DoubleRegister frb) { |
| 2020 emit(EXT4 | FRIM | frt.code()*B21 | frb.code()*B11); |
| 2021 } |
| 2022 |
| 2023 |
| 2024 void Assembler::frsp(const DoubleRegister frt, |
| 2025 const DoubleRegister frb, |
| 2026 RCBit rc) { |
| 2027 emit(EXT4 | FRSP | frt.code()*B21 | frb.code()*B11 | rc); |
| 2028 } |
| 2029 |
| 2030 |
| 2031 void Assembler::fcfid(const DoubleRegister frt, |
| 2032 const DoubleRegister frb, |
| 2033 RCBit rc) { |
| 2034 emit(EXT4 | FCFID | frt.code()*B21 | frb.code()*B11 | rc); |
| 2035 } |
| 2036 |
| 2037 |
| 2038 void Assembler::fctid(const DoubleRegister frt, |
| 2039 const DoubleRegister frb, |
| 2040 RCBit rc) { |
| 2041 emit(EXT4 | FCTID | frt.code()*B21 | frb.code()*B11 | rc); |
| 2042 } |
| 2043 |
| 2044 |
| 2045 void Assembler::fctidz(const DoubleRegister frt, |
| 2046 const DoubleRegister frb, |
| 2047 RCBit rc) { |
| 2048 emit(EXT4 | FCTIDZ | frt.code()*B21 | frb.code()*B11 | rc); |
| 2049 } |
| 2050 |
| 2051 |
| 2052 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra, |
| 2053 const DoubleRegister frc, const DoubleRegister frb, |
| 2054 RCBit rc) { |
| 2055 emit(EXT4 | FSEL | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | |
| 2056 frc.code()*B6 | rc); |
| 2057 } |
| 2058 |
| 2059 |
| 2060 void Assembler::fneg(const DoubleRegister frt, |
| 2061 const DoubleRegister frb, |
| 2062 RCBit rc) { |
| 2063 emit(EXT4 | FNEG | frt.code()*B21 | frb.code()*B11 | rc); |
| 2064 } |
| 2065 |
| 2066 |
| 2067 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) { |
| 2068 emit(EXT4 | MTFSFI | bf*B23 | immediate*B12 | rc); |
| 2069 } |
| 2070 |
| 2071 |
| 2072 void Assembler::mffs(const DoubleRegister frt, RCBit rc) { |
| 2073 emit(EXT4 | MFFS | frt.code()*B21 | rc); |
| 2074 } |
| 2075 |
| 2076 |
| 2077 void Assembler::mtfsf(const DoubleRegister frb, bool L, |
| 2078 int FLM, bool W, RCBit rc) { |
| 2079 emit(EXT4 | MTFSF | frb.code()*B11 | W*B16 | FLM*B17 | L*B25 | rc); |
| 2080 } |
| 2081 |
| 2082 |
| 2083 void Assembler::fsqrt(const DoubleRegister frt, |
| 2084 const DoubleRegister frb, |
| 2085 RCBit rc) { |
| 2086 emit(EXT4 | FSQRT | frt.code()*B21 | frb.code()*B11 | rc); |
| 2087 } |
| 2088 |
| 2089 |
| 2090 void Assembler::fabs(const DoubleRegister frt, |
| 2091 const DoubleRegister frb, |
| 2092 RCBit rc) { |
| 2093 emit(EXT4 | FABS | frt.code()*B21 | frb.code()*B11 | rc); |
| 2094 } |
| 2095 |
| 2096 |
| 2097 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra, |
| 2098 const DoubleRegister frc, const DoubleRegister frb, |
| 2099 RCBit rc) { |
| 2100 emit(EXT4 | FMADD | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | |
| 2101 frc.code()*B6 | rc); |
| 2102 } |
| 2103 |
| 2104 |
| 2105 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra, |
| 2106 const DoubleRegister frc, const DoubleRegister frb, |
| 2107 RCBit rc) { |
| 2108 emit(EXT4 | FMSUB | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | |
| 2109 frc.code()*B6 | rc); |
| 2110 } |
| 2111 |
| 2112 |
| 2113 // Pseudo instructions. |
| 2114 void Assembler::nop(int type) { |
| 2115 switch (type) { |
| 2116 case 0: |
| 2117 ori(r0, r0, Operand::Zero()); |
| 2118 break; |
| 2119 case DEBUG_BREAK_NOP: |
| 2120 ori(r3, r3, Operand::Zero()); |
| 2121 break; |
| 2122 default: |
| 2123 UNIMPLEMENTED(); |
| 2124 } |
| 2125 } |
| 2126 |
| 2127 |
| 2128 bool Assembler::IsNop(Instr instr, int type) { |
| 2129 ASSERT((0 == type) || (DEBUG_BREAK_NOP == type)); |
| 2130 int reg = 0; |
| 2131 if (DEBUG_BREAK_NOP == type) { |
| 2132 reg = 3; |
| 2133 } |
| 2134 return instr == (ORI | reg*B21 | reg*B16); |
| 2135 } |
| 2136 |
| 2137 |
| 2138 // Debugging. |
| 2139 void Assembler::RecordJSReturn() { |
| 2140 positions_recorder()->WriteRecordedPositions(); |
| 2141 CheckBuffer(); |
| 2142 RecordRelocInfo(RelocInfo::JS_RETURN); |
| 2143 } |
| 2144 |
| 2145 |
| 2146 void Assembler::RecordDebugBreakSlot() { |
| 2147 positions_recorder()->WriteRecordedPositions(); |
| 2148 CheckBuffer(); |
| 2149 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); |
| 2150 } |
| 2151 |
| 2152 |
| 2153 void Assembler::RecordComment(const char* msg) { |
| 2154 if (FLAG_code_comments) { |
| 2155 CheckBuffer(); |
| 2156 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
| 2157 } |
| 2158 } |
| 2159 |
| 2160 |
| 2161 void Assembler::GrowBuffer() { |
| 2162 if (!own_buffer_) FATAL("external code buffer is too small"); |
| 2163 |
| 2164 // Compute new buffer size. |
| 2165 CodeDesc desc; // the new buffer |
| 2166 if (buffer_size_ < 4*KB) { |
| 2167 desc.buffer_size = 4*KB; |
| 2168 } else if (buffer_size_ < 1*MB) { |
| 2169 desc.buffer_size = 2*buffer_size_; |
| 2170 } else { |
| 2171 desc.buffer_size = buffer_size_ + 1*MB; |
| 2172 } |
| 2173 CHECK_GT(desc.buffer_size, 0); // no overflow |
| 2174 |
| 2175 // Set up new buffer. |
| 2176 desc.buffer = NewArray<byte>(desc.buffer_size); |
| 2177 |
| 2178 desc.instr_size = pc_offset(); |
| 2179 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 2180 |
| 2181 // Copy the data. |
| 2182 intptr_t pc_delta = desc.buffer - buffer_; |
| 2183 intptr_t rc_delta = (desc.buffer + desc.buffer_size) - |
| 2184 (buffer_ + buffer_size_); |
| 2185 memmove(desc.buffer, buffer_, desc.instr_size); |
| 2186 memmove(reloc_info_writer.pos() + rc_delta, |
| 2187 reloc_info_writer.pos(), desc.reloc_size); |
| 2188 |
| 2189 // Switch buffers. |
| 2190 DeleteArray(buffer_); |
| 2191 buffer_ = desc.buffer; |
| 2192 buffer_size_ = desc.buffer_size; |
| 2193 pc_ += pc_delta; |
| 2194 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
| 2195 reloc_info_writer.last_pc() + pc_delta); |
| 2196 |
| 2197 // None of our relocation types are pc relative pointing outside the code |
| 2198 // buffer nor pc absolute pointing inside the code buffer, so there is no need |
| 2199 // to relocate any emitted relocation entries. |
| 2200 |
| 2201 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 2202 // Relocate runtime entries. |
| 2203 for (RelocIterator it(desc); !it.done(); it.next()) { |
| 2204 RelocInfo::Mode rmode = it.rinfo()->rmode(); |
| 2205 if (rmode == RelocInfo::INTERNAL_REFERENCE) { |
| 2206 RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0); |
| 2207 } |
| 2208 } |
| 2209 #if V8_OOL_CONSTANT_POOL |
| 2210 constant_pool_builder_.Relocate(pc_delta); |
| 2211 #endif |
| 2212 #endif |
| 2213 } |
| 2214 |
| 2215 |
| 2216 void Assembler::db(uint8_t data) { |
| 2217 CheckBuffer(); |
| 2218 *reinterpret_cast<uint8_t*>(pc_) = data; |
| 2219 pc_ += sizeof(uint8_t); |
| 2220 } |
| 2221 |
| 2222 |
| 2223 void Assembler::dd(uint32_t data) { |
| 2224 CheckBuffer(); |
| 2225 *reinterpret_cast<uint32_t*>(pc_) = data; |
| 2226 pc_ += sizeof(uint32_t); |
| 2227 } |
| 2228 |
| 2229 |
| 2230 void Assembler::emit_ptr(uintptr_t data) { |
| 2231 CheckBuffer(); |
| 2232 *reinterpret_cast<uintptr_t*>(pc_) = data; |
| 2233 pc_ += sizeof(uintptr_t); |
| 2234 } |
| 2235 |
| 2236 |
| 2237 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 2238 RelocInfo rinfo(pc_, rmode, data, NULL); |
| 2239 RecordRelocInfo(rinfo); |
| 2240 } |
| 2241 |
| 2242 |
| 2243 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { |
| 2244 if (rinfo.rmode() >= RelocInfo::JS_RETURN && |
| 2245 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) { |
| 2246 // Adjust code for new modes. |
| 2247 ASSERT(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) |
| 2248 || RelocInfo::IsJSReturn(rinfo.rmode()) |
| 2249 || RelocInfo::IsComment(rinfo.rmode()) |
| 2250 || RelocInfo::IsPosition(rinfo.rmode())); |
| 2251 } |
| 2252 if (!RelocInfo::IsNone(rinfo.rmode())) { |
| 2253 // Don't record external references unless the heap will be serialized. |
| 2254 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { |
| 2255 if (!serializer_enabled() && !emit_debug_code()) { |
| 2256 return; |
| 2257 } |
| 2258 } |
| 2259 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
| 2260 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { |
| 2261 RelocInfo reloc_info_with_ast_id(rinfo.pc(), |
| 2262 rinfo.rmode(), |
| 2263 RecordedAstId().ToInt(), |
| 2264 NULL); |
| 2265 ClearRecordedAstId(); |
| 2266 reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 2267 } else { |
| 2268 reloc_info_writer.Write(&rinfo); |
| 2269 } |
| 2270 } |
| 2271 } |
| 2272 |
| 2273 |
| 2274 void Assembler::BlockTrampolinePoolFor(int instructions) { |
| 2275 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); |
| 2276 } |
| 2277 |
| 2278 |
| 2279 void Assembler::CheckTrampolinePool() { |
| 2280 // Some small sequences of instructions must not be broken up by the |
| 2281 // insertion of a trampoline pool; such sequences are protected by setting |
| 2282 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, |
| 2283 // which are both checked here. Also, recursive calls to CheckTrampolinePool |
| 2284 // are blocked by trampoline_pool_blocked_nesting_. |
| 2285 if ((trampoline_pool_blocked_nesting_ > 0) || |
| 2286 (pc_offset() < no_trampoline_pool_before_)) { |
| 2287 // Emission is currently blocked; make sure we try again as soon as |
| 2288 // possible. |
| 2289 if (trampoline_pool_blocked_nesting_ > 0) { |
| 2290 next_buffer_check_ = pc_offset() + kInstrSize; |
| 2291 } else { |
| 2292 next_buffer_check_ = no_trampoline_pool_before_; |
| 2293 } |
| 2294 return; |
| 2295 } |
| 2296 |
| 2297 ASSERT(!trampoline_emitted_); |
| 2298 ASSERT(unbound_labels_count_ >= 0); |
| 2299 if (unbound_labels_count_ > 0) { |
| 2300 // First we emit jump, then we emit trampoline pool. |
| 2301 { BlockTrampolinePoolScope block_trampoline_pool(this); |
| 2302 Label after_pool; |
| 2303 b(&after_pool); |
| 2304 |
| 2305 int pool_start = pc_offset(); |
| 2306 for (int i = 0; i < unbound_labels_count_; i++) { |
| 2307 b(&after_pool); |
| 2308 } |
| 2309 bind(&after_pool); |
| 2310 trampoline_ = Trampoline(pool_start, unbound_labels_count_); |
| 2311 |
| 2312 trampoline_emitted_ = true; |
| 2313 // As we are only going to emit trampoline once, we need to prevent any |
| 2314 // further emission. |
| 2315 next_buffer_check_ = kMaxInt; |
| 2316 } |
| 2317 } else { |
| 2318 // Number of branches to unbound label at this point is zero, so we can |
| 2319 // move next buffer check to maximum. |
| 2320 next_buffer_check_ = pc_offset() + |
| 2321 kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; |
| 2322 } |
| 2323 return; |
| 2324 } |
| 2325 |
| 2326 |
| 2327 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { |
| 2328 #if V8_OOL_CONSTANT_POOL |
| 2329 return constant_pool_builder_.New(isolate); |
| 2330 #else |
| 2331 // No out-of-line constant pool support. |
| 2332 ASSERT(!FLAG_enable_ool_constant_pool); |
| 2333 return isolate->factory()->empty_constant_pool_array(); |
| 2334 #endif |
| 2335 } |
| 2336 |
| 2337 |
| 2338 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { |
| 2339 #if V8_OOL_CONSTANT_POOL |
| 2340 constant_pool_builder_.Populate(this, constant_pool); |
| 2341 #else |
| 2342 // No out-of-line constant pool support. |
| 2343 ASSERT(!FLAG_enable_ool_constant_pool); |
| 2344 #endif |
| 2345 } |
| 2346 |
| 2347 |
| 2348 #if V8_OOL_CONSTANT_POOL |
| 2349 ConstantPoolBuilder::ConstantPoolBuilder() |
| 2350 : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {} |
| 2351 |
| 2352 |
| 2353 bool ConstantPoolBuilder::IsEmpty() { |
| 2354 return entries_.size() == 0; |
| 2355 } |
| 2356 |
| 2357 |
| 2358 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( |
| 2359 RelocInfo::Mode rmode) { |
| 2360 #if V8_TARGET_ARCH_PPC64 |
| 2361 // We don't support 32-bit entries at this time. |
| 2362 if (!RelocInfo::IsGCRelocMode(rmode)) { |
| 2363 return ConstantPoolArray::INT64; |
| 2364 #else |
| 2365 if (rmode == RelocInfo::NONE64) { |
| 2366 return ConstantPoolArray::INT64; |
| 2367 } else if (!RelocInfo::IsGCRelocMode(rmode)) { |
| 2368 return ConstantPoolArray::INT32; |
| 2369 #endif |
| 2370 } else if (RelocInfo::IsCodeTarget(rmode)) { |
| 2371 return ConstantPoolArray::CODE_PTR; |
| 2372 } else { |
| 2373 ASSERT(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); |
| 2374 return ConstantPoolArray::HEAP_PTR; |
| 2375 } |
| 2376 } |
| 2377 |
| 2378 |
| 2379 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( |
| 2380 Assembler* assm, const RelocInfo& rinfo) { |
| 2381 RelocInfo::Mode rmode = rinfo.rmode(); |
| 2382 ASSERT(rmode != RelocInfo::COMMENT && |
| 2383 rmode != RelocInfo::POSITION && |
| 2384 rmode != RelocInfo::STATEMENT_POSITION && |
| 2385 rmode != RelocInfo::CONST_POOL); |
| 2386 |
| 2387 // Try to merge entries which won't be patched. |
| 2388 int merged_index = -1; |
| 2389 ConstantPoolArray::LayoutSection entry_section = current_section_; |
| 2390 if (RelocInfo::IsNone(rmode) || |
| 2391 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { |
| 2392 size_t i; |
| 2393 std::vector<ConstantPoolEntry>::const_iterator it; |
| 2394 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { |
| 2395 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { |
| 2396 // Merge with found entry. |
| 2397 merged_index = i; |
| 2398 entry_section = entries_[i].section_; |
| 2399 break; |
| 2400 } |
| 2401 } |
| 2402 } |
| 2403 |
| 2404 ASSERT(entry_section <= current_section_); |
| 2405 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); |
| 2406 |
| 2407 if (merged_index == -1) { |
| 2408 // Not merged, so update the appropriate count. |
| 2409 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); |
| 2410 } |
| 2411 |
| 2412 // Check if we still have room for another entry in the small section |
| 2413 // given PPC's load immediate offset range. |
| 2414 if (current_section_ == ConstantPoolArray::SMALL_SECTION && |
| 2415 !is_int16(ConstantPoolArray::SizeFor(*small_entries()))) { |
| 2416 current_section_ = ConstantPoolArray::EXTENDED_SECTION; |
| 2417 } |
| 2418 return entry_section; |
| 2419 } |
| 2420 |
| 2421 |
| 2422 void ConstantPoolBuilder::Relocate(intptr_t pc_delta) { |
| 2423 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
| 2424 entry != entries_.end(); entry++) { |
| 2425 ASSERT(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); |
| 2426 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); |
| 2427 } |
| 2428 } |
| 2429 |
| 2430 |
| 2431 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { |
| 2432 if (IsEmpty()) { |
| 2433 return isolate->factory()->empty_constant_pool_array(); |
| 2434 } else if (extended_entries()->is_empty()) { |
| 2435 return isolate->factory()->NewConstantPoolArray(*small_entries()); |
| 2436 } else { |
| 2437 ASSERT(current_section_ == ConstantPoolArray::EXTENDED_SECTION); |
| 2438 return isolate->factory()->NewExtendedConstantPoolArray( |
| 2439 *small_entries(), *extended_entries()); |
| 2440 } |
| 2441 } |
| 2442 |
| 2443 |
| 2444 void ConstantPoolBuilder::Populate(Assembler* assm, |
| 2445 ConstantPoolArray* constant_pool) { |
| 2446 ASSERT_EQ(extended_entries()->is_empty(), |
| 2447 !constant_pool->is_extended_layout()); |
| 2448 ASSERT(small_entries()->equals(ConstantPoolArray::NumberOfEntries( |
| 2449 constant_pool, ConstantPoolArray::SMALL_SECTION))); |
| 2450 if (constant_pool->is_extended_layout()) { |
| 2451 ASSERT(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( |
| 2452 constant_pool, ConstantPoolArray::EXTENDED_SECTION))); |
| 2453 } |
| 2454 |
| 2455 // Set up initial offsets. |
| 2456 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS] |
| 2457 [ConstantPoolArray::NUMBER_OF_TYPES]; |
| 2458 for (int section = 0; section <= constant_pool->final_section(); section++) { |
| 2459 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION) |
| 2460 ? small_entries()->total_count() |
| 2461 : 0; |
| 2462 for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) { |
| 2463 ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i); |
| 2464 if (number_of_entries_[section].count_of(type) != 0) { |
| 2465 offsets[section][type] = constant_pool->OffsetOfElementAt( |
| 2466 number_of_entries_[section].base_of(type) + section_start); |
| 2467 } |
| 2468 } |
| 2469 } |
| 2470 |
| 2471 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
| 2472 entry != entries_.end(); entry++) { |
| 2473 RelocInfo rinfo = entry->rinfo_; |
| 2474 RelocInfo::Mode rmode = entry->rinfo_.rmode(); |
| 2475 ConstantPoolArray::Type type = GetConstantPoolType(rmode); |
| 2476 |
| 2477 // Update constant pool if necessary and get the entry's offset. |
| 2478 int offset; |
| 2479 if (entry->merged_index_ == -1) { |
| 2480 offset = offsets[entry->section_][type]; |
| 2481 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type); |
| 2482 if (type == ConstantPoolArray::INT64) { |
| 2483 #if V8_TARGET_ARCH_PPC64 |
| 2484 constant_pool->set_at_offset(offset, rinfo.data()); |
| 2485 #else |
| 2486 constant_pool->set_at_offset(offset, rinfo.data64()); |
| 2487 } else if (type == ConstantPoolArray::INT32) { |
| 2488 constant_pool->set_at_offset(offset, |
| 2489 static_cast<int32_t>(rinfo.data())); |
| 2490 #endif |
| 2491 } else if (type == ConstantPoolArray::CODE_PTR) { |
| 2492 constant_pool->set_at_offset(offset, |
| 2493 reinterpret_cast<Address>(rinfo.data())); |
| 2494 } else { |
| 2495 ASSERT(type == ConstantPoolArray::HEAP_PTR); |
| 2496 constant_pool->set_at_offset(offset, |
| 2497 reinterpret_cast<Object*>(rinfo.data())); |
| 2498 } |
| 2499 offset -= kHeapObjectTag; |
| 2500 entry->merged_index_ = offset; // Stash offset for merged entries. |
| 2501 } else { |
| 2502 ASSERT(entry->merged_index_ < (entry - entries_.begin())); |
| 2503 offset = entries_[entry->merged_index_].merged_index_; |
| 2504 } |
| 2505 |
| 2506 // Patch load instruction with correct offset. |
| 2507 Assembler::SetConstantPoolOffset(rinfo.pc(), offset); |
| 2508 } |
| 2509 } |
| 2510 #endif |
| 2511 |
| 2512 |
| 2513 } } // namespace v8::internal |
| 2514 |
| 2515 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |