| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // | |
| 3 // Redistribution and use in source and binary forms, with or without | |
| 4 // modification, are permitted provided that the following conditions are | |
| 5 // met: | |
| 6 // | |
| 7 // * Redistributions of source code must retain the above copyright | |
| 8 // notice, this list of conditions and the following disclaimer. | |
| 9 // * Redistributions in binary form must reproduce the above | |
| 10 // copyright notice, this list of conditions and the following | |
| 11 // disclaimer in the documentation and/or other materials provided | |
| 12 // with the distribution. | |
| 13 // * Neither the name of Google Inc. nor the names of its | |
| 14 // contributors may be used to endorse or promote products derived | |
| 15 // from this software without specific prior written permission. | |
| 16 // | |
| 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 28 | |
| 29 #include "v8.h" | |
| 30 | |
| 31 #if V8_TARGET_ARCH_A64 | |
| 32 | |
| 33 #define A64_DEFINE_REG_STATICS | |
| 34 | |
| 35 #include "a64/assembler-a64-inl.h" | |
| 36 | |
| 37 namespace v8 { | |
| 38 namespace internal { | |
| 39 | |
| 40 | |
| 41 // ----------------------------------------------------------------------------- | |
| 42 // CpuFeatures utilities (for V8 compatibility). | |
| 43 | |
| 44 ExternalReference ExternalReference::cpu_features() { | |
| 45 return ExternalReference(&CpuFeatures::supported_); | |
| 46 } | |
| 47 | |
| 48 | |
| 49 // ----------------------------------------------------------------------------- | |
| 50 // CPURegList utilities. | |
| 51 | |
| 52 CPURegister CPURegList::PopLowestIndex() { | |
| 53 ASSERT(IsValid()); | |
| 54 if (IsEmpty()) { | |
| 55 return NoCPUReg; | |
| 56 } | |
| 57 int index = CountTrailingZeros(list_, kRegListSizeInBits); | |
| 58 ASSERT((1 << index) & list_); | |
| 59 Remove(index); | |
| 60 return CPURegister::Create(index, size_, type_); | |
| 61 } | |
| 62 | |
| 63 | |
| 64 CPURegister CPURegList::PopHighestIndex() { | |
| 65 ASSERT(IsValid()); | |
| 66 if (IsEmpty()) { | |
| 67 return NoCPUReg; | |
| 68 } | |
| 69 int index = CountLeadingZeros(list_, kRegListSizeInBits); | |
| 70 index = kRegListSizeInBits - 1 - index; | |
| 71 ASSERT((1 << index) & list_); | |
| 72 Remove(index); | |
| 73 return CPURegister::Create(index, size_, type_); | |
| 74 } | |
| 75 | |
| 76 | |
| 77 void CPURegList::RemoveCalleeSaved() { | |
| 78 if (type() == CPURegister::kRegister) { | |
| 79 Remove(GetCalleeSaved(RegisterSizeInBits())); | |
| 80 } else if (type() == CPURegister::kFPRegister) { | |
| 81 Remove(GetCalleeSavedFP(RegisterSizeInBits())); | |
| 82 } else { | |
| 83 ASSERT(type() == CPURegister::kNoRegister); | |
| 84 ASSERT(IsEmpty()); | |
| 85 // The list must already be empty, so do nothing. | |
| 86 } | |
| 87 } | |
| 88 | |
| 89 | |
| 90 CPURegList CPURegList::GetCalleeSaved(unsigned size) { | |
| 91 return CPURegList(CPURegister::kRegister, size, 19, 29); | |
| 92 } | |
| 93 | |
| 94 | |
| 95 CPURegList CPURegList::GetCalleeSavedFP(unsigned size) { | |
| 96 return CPURegList(CPURegister::kFPRegister, size, 8, 15); | |
| 97 } | |
| 98 | |
| 99 | |
| 100 CPURegList CPURegList::GetCallerSaved(unsigned size) { | |
| 101 // Registers x0-x18 and lr (x30) are caller-saved. | |
| 102 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18); | |
| 103 list.Combine(lr); | |
| 104 return list; | |
| 105 } | |
| 106 | |
| 107 | |
| 108 CPURegList CPURegList::GetCallerSavedFP(unsigned size) { | |
| 109 // Registers d0-d7 and d16-d31 are caller-saved. | |
| 110 CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7); | |
| 111 list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31)); | |
| 112 return list; | |
| 113 } | |
| 114 | |
| 115 | |
| 116 // This function defines the list of registers which are associated with a | |
| 117 // safepoint slot. Safepoint register slots are saved contiguously on the stack. | |
| 118 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register | |
| 119 // code to index in the safepoint register slots. Any change here can affect | |
| 120 // this mapping. | |
| 121 CPURegList CPURegList::GetSafepointSavedRegisters() { | |
| 122 CPURegList list = CPURegList::GetCalleeSaved(); | |
| 123 list.Combine(CPURegList(CPURegister::kRegister, kXRegSize, kJSCallerSaved)); | |
| 124 | |
| 125 // Note that unfortunately we can't use symbolic names for registers and have | |
| 126 // to directly use register codes. This is because this function is used to | |
| 127 // initialize some static variables and we can't rely on register variables | |
| 128 // to be initialized due to static initialization order issues in C++. | |
| 129 | |
| 130 // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be | |
| 131 // preserved outside of the macro assembler. | |
| 132 list.Remove(16); | |
| 133 list.Remove(17); | |
| 134 | |
| 135 // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it | |
| 136 // is a caller-saved register according to the procedure call standard. | |
| 137 list.Combine(18); | |
| 138 | |
| 139 // Drop jssp as the stack pointer doesn't need to be included. | |
| 140 list.Remove(28); | |
| 141 | |
| 142 // Add the link register (x30) to the safepoint list. | |
| 143 list.Combine(30); | |
| 144 | |
| 145 return list; | |
| 146 } | |
| 147 | |
| 148 | |
| 149 // ----------------------------------------------------------------------------- | |
| 150 // Implementation of RelocInfo | |
| 151 | |
| 152 const int RelocInfo::kApplyMask = 0; | |
| 153 | |
| 154 | |
| 155 bool RelocInfo::IsCodedSpecially() { | |
| 156 // The deserializer needs to know whether a pointer is specially coded. Being | |
| 157 // specially coded on A64 means that it is a movz/movk sequence. We don't | |
| 158 // generate those for relocatable pointers. | |
| 159 return false; | |
| 160 } | |
| 161 | |
| 162 | |
| 163 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { | |
| 164 // Patch the code at the current address with the supplied instructions. | |
| 165 Instr* pc = reinterpret_cast<Instr*>(pc_); | |
| 166 Instr* instr = reinterpret_cast<Instr*>(instructions); | |
| 167 for (int i = 0; i < instruction_count; i++) { | |
| 168 *(pc + i) = *(instr + i); | |
| 169 } | |
| 170 | |
| 171 // Indicate that code has changed. | |
| 172 CPU::FlushICache(pc_, instruction_count * kInstructionSize); | |
| 173 } | |
| 174 | |
| 175 | |
| 176 // Patch the code at the current PC with a call to the target address. | |
| 177 // Additional guard instructions can be added if required. | |
| 178 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { | |
| 179 UNIMPLEMENTED(); | |
| 180 } | |
| 181 | |
| 182 | |
| 183 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2, | |
| 184 Register reg3, Register reg4) { | |
| 185 CPURegList regs(reg1, reg2, reg3, reg4); | |
| 186 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { | |
| 187 Register candidate = Register::FromAllocationIndex(i); | |
| 188 if (regs.IncludesAliasOf(candidate)) continue; | |
| 189 return candidate; | |
| 190 } | |
| 191 UNREACHABLE(); | |
| 192 return NoReg; | |
| 193 } | |
| 194 | |
| 195 | |
| 196 bool AreAliased(const CPURegister& reg1, const CPURegister& reg2, | |
| 197 const CPURegister& reg3, const CPURegister& reg4, | |
| 198 const CPURegister& reg5, const CPURegister& reg6, | |
| 199 const CPURegister& reg7, const CPURegister& reg8) { | |
| 200 int number_of_valid_regs = 0; | |
| 201 int number_of_valid_fpregs = 0; | |
| 202 | |
| 203 RegList unique_regs = 0; | |
| 204 RegList unique_fpregs = 0; | |
| 205 | |
| 206 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; | |
| 207 | |
| 208 for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) { | |
| 209 if (regs[i].IsRegister()) { | |
| 210 number_of_valid_regs++; | |
| 211 unique_regs |= regs[i].Bit(); | |
| 212 } else if (regs[i].IsFPRegister()) { | |
| 213 number_of_valid_fpregs++; | |
| 214 unique_fpregs |= regs[i].Bit(); | |
| 215 } else { | |
| 216 ASSERT(!regs[i].IsValid()); | |
| 217 } | |
| 218 } | |
| 219 | |
| 220 int number_of_unique_regs = | |
| 221 CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte); | |
| 222 int number_of_unique_fpregs = | |
| 223 CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte); | |
| 224 | |
| 225 ASSERT(number_of_valid_regs >= number_of_unique_regs); | |
| 226 ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs); | |
| 227 | |
| 228 return (number_of_valid_regs != number_of_unique_regs) || | |
| 229 (number_of_valid_fpregs != number_of_unique_fpregs); | |
| 230 } | |
| 231 | |
| 232 | |
| 233 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2, | |
| 234 const CPURegister& reg3, const CPURegister& reg4, | |
| 235 const CPURegister& reg5, const CPURegister& reg6, | |
| 236 const CPURegister& reg7, const CPURegister& reg8) { | |
| 237 ASSERT(reg1.IsValid()); | |
| 238 bool match = true; | |
| 239 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); | |
| 240 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); | |
| 241 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); | |
| 242 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); | |
| 243 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); | |
| 244 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); | |
| 245 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); | |
| 246 return match; | |
| 247 } | |
| 248 | |
| 249 | |
| 250 void Operand::initialize_handle(Handle<Object> handle) { | |
| 251 AllowDeferredHandleDereference using_raw_address; | |
| 252 | |
| 253 // Verify all Objects referred by code are NOT in new space. | |
| 254 Object* obj = *handle; | |
| 255 if (obj->IsHeapObject()) { | |
| 256 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); | |
| 257 immediate_ = reinterpret_cast<intptr_t>(handle.location()); | |
| 258 rmode_ = RelocInfo::EMBEDDED_OBJECT; | |
| 259 } else { | |
| 260 STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t)); | |
| 261 immediate_ = reinterpret_cast<intptr_t>(obj); | |
| 262 rmode_ = RelocInfo::NONE64; | |
| 263 } | |
| 264 } | |
| 265 | |
| 266 | |
| 267 bool Operand::NeedsRelocation() const { | |
| 268 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { | |
| 269 #ifdef DEBUG | |
| 270 if (!Serializer::enabled()) { | |
| 271 Serializer::TooLateToEnableNow(); | |
| 272 } | |
| 273 #endif | |
| 274 return Serializer::enabled(); | |
| 275 } | |
| 276 | |
| 277 return !RelocInfo::IsNone(rmode_); | |
| 278 } | |
| 279 | |
| 280 | |
| 281 // Assembler | |
| 282 | |
| 283 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) | |
| 284 : AssemblerBase(isolate, buffer, buffer_size), | |
| 285 recorded_ast_id_(TypeFeedbackId::None()), | |
| 286 unresolved_branches_(), | |
| 287 positions_recorder_(this) { | |
| 288 const_pool_blocked_nesting_ = 0; | |
| 289 Reset(); | |
| 290 } | |
| 291 | |
| 292 | |
| 293 Assembler::~Assembler() { | |
| 294 ASSERT(num_pending_reloc_info_ == 0); | |
| 295 ASSERT(const_pool_blocked_nesting_ == 0); | |
| 296 } | |
| 297 | |
| 298 | |
| 299 void Assembler::Reset() { | |
| 300 #ifdef DEBUG | |
| 301 ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); | |
| 302 ASSERT(const_pool_blocked_nesting_ == 0); | |
| 303 memset(buffer_, 0, pc_ - buffer_); | |
| 304 #endif | |
| 305 pc_ = buffer_; | |
| 306 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), | |
| 307 reinterpret_cast<byte*>(pc_)); | |
| 308 num_pending_reloc_info_ = 0; | |
| 309 next_buffer_check_ = 0; | |
| 310 no_const_pool_before_ = 0; | |
| 311 first_const_pool_use_ = -1; | |
| 312 ClearRecordedAstId(); | |
| 313 } | |
| 314 | |
| 315 | |
| 316 void Assembler::GetCode(CodeDesc* desc) { | |
| 317 // Emit constant pool if necessary. | |
| 318 CheckConstPool(true, false); | |
| 319 ASSERT(num_pending_reloc_info_ == 0); | |
| 320 | |
| 321 // Set up code descriptor. | |
| 322 if (desc) { | |
| 323 desc->buffer = reinterpret_cast<byte*>(buffer_); | |
| 324 desc->buffer_size = buffer_size_; | |
| 325 desc->instr_size = pc_offset(); | |
| 326 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) - | |
| 327 reloc_info_writer.pos(); | |
| 328 desc->origin = this; | |
| 329 } | |
| 330 } | |
| 331 | |
| 332 | |
| 333 void Assembler::Align(int m) { | |
| 334 ASSERT(m >= 4 && IsPowerOf2(m)); | |
| 335 while ((pc_offset() & (m - 1)) != 0) { | |
| 336 nop(); | |
| 337 } | |
| 338 } | |
| 339 | |
| 340 | |
| 341 void Assembler::CheckLabelLinkChain(Label const * label) { | |
| 342 #ifdef DEBUG | |
| 343 if (label->is_linked()) { | |
| 344 int linkoffset = label->pos(); | |
| 345 bool end_of_chain = false; | |
| 346 while (!end_of_chain) { | |
| 347 Instruction * link = InstructionAt(linkoffset); | |
| 348 int linkpcoffset = link->ImmPCOffset(); | |
| 349 int prevlinkoffset = linkoffset + linkpcoffset; | |
| 350 | |
| 351 end_of_chain = (linkoffset == prevlinkoffset); | |
| 352 linkoffset = linkoffset + linkpcoffset; | |
| 353 } | |
| 354 } | |
| 355 #endif | |
| 356 } | |
| 357 | |
| 358 | |
| 359 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch, | |
| 360 Label* label, | |
| 361 Instruction* label_veneer) { | |
| 362 ASSERT(label->is_linked()); | |
| 363 | |
| 364 CheckLabelLinkChain(label); | |
| 365 | |
| 366 Instruction* link = InstructionAt(label->pos()); | |
| 367 Instruction* prev_link = link; | |
| 368 Instruction* next_link; | |
| 369 bool end_of_chain = false; | |
| 370 | |
| 371 while (link != branch && !end_of_chain) { | |
| 372 next_link = link->ImmPCOffsetTarget(); | |
| 373 end_of_chain = (link == next_link); | |
| 374 prev_link = link; | |
| 375 link = next_link; | |
| 376 } | |
| 377 | |
| 378 ASSERT(branch == link); | |
| 379 next_link = branch->ImmPCOffsetTarget(); | |
| 380 | |
| 381 if (branch == prev_link) { | |
| 382 // The branch is the first instruction in the chain. | |
| 383 if (branch == next_link) { | |
| 384 // It is also the last instruction in the chain, so it is the only branch | |
| 385 // currently referring to this label. | |
| 386 label->Unuse(); | |
| 387 } else { | |
| 388 label->link_to(reinterpret_cast<byte*>(next_link) - buffer_); | |
| 389 } | |
| 390 | |
| 391 } else if (branch == next_link) { | |
| 392 // The branch is the last (but not also the first) instruction in the chain. | |
| 393 prev_link->SetImmPCOffsetTarget(prev_link); | |
| 394 | |
| 395 } else { | |
| 396 // The branch is in the middle of the chain. | |
| 397 if (prev_link->IsTargetInImmPCOffsetRange(next_link)) { | |
| 398 prev_link->SetImmPCOffsetTarget(next_link); | |
| 399 } else if (label_veneer != NULL) { | |
| 400 // Use the veneer for all previous links in the chain. | |
| 401 prev_link->SetImmPCOffsetTarget(prev_link); | |
| 402 | |
| 403 end_of_chain = false; | |
| 404 link = next_link; | |
| 405 while (!end_of_chain) { | |
| 406 next_link = link->ImmPCOffsetTarget(); | |
| 407 end_of_chain = (link == next_link); | |
| 408 link->SetImmPCOffsetTarget(label_veneer); | |
| 409 link = next_link; | |
| 410 } | |
| 411 } else { | |
| 412 // The assert below will fire. | |
| 413 // Some other work could be attempted to fix up the chain, but it would be | |
| 414 // rather complicated. If we crash here, we may want to consider using an | |
| 415 // other mechanism than a chain of branches. | |
| 416 // | |
| 417 // Note that this situation currently should not happen, as we always call | |
| 418 // this function with a veneer to the target label. | |
| 419 // However this could happen with a MacroAssembler in the following state: | |
| 420 // [previous code] | |
| 421 // B(label); | |
| 422 // [20KB code] | |
| 423 // Tbz(label); // First tbz. Pointing to unconditional branch. | |
| 424 // [20KB code] | |
| 425 // Tbz(label); // Second tbz. Pointing to the first tbz. | |
| 426 // [more code] | |
| 427 // and this function is called to remove the first tbz from the label link | |
| 428 // chain. Since tbz has a range of +-32KB, the second tbz cannot point to | |
| 429 // the unconditional branch. | |
| 430 CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link)); | |
| 431 UNREACHABLE(); | |
| 432 } | |
| 433 } | |
| 434 | |
| 435 CheckLabelLinkChain(label); | |
| 436 } | |
| 437 | |
| 438 | |
| 439 void Assembler::bind(Label* label) { | |
| 440 // Bind label to the address at pc_. All instructions (most likely branches) | |
| 441 // that are linked to this label will be updated to point to the newly-bound | |
| 442 // label. | |
| 443 | |
| 444 ASSERT(!label->is_near_linked()); | |
| 445 ASSERT(!label->is_bound()); | |
| 446 | |
| 447 // If the label is linked, the link chain looks something like this: | |
| 448 // | |
| 449 // |--I----I-------I-------L | |
| 450 // |---------------------->| pc_offset | |
| 451 // |-------------->| linkoffset = label->pos() | |
| 452 // |<------| link->ImmPCOffset() | |
| 453 // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset() | |
| 454 // | |
| 455 // On each iteration, the last link is updated and then removed from the | |
| 456 // chain until only one remains. At that point, the label is bound. | |
| 457 // | |
| 458 // If the label is not linked, no preparation is required before binding. | |
| 459 while (label->is_linked()) { | |
| 460 int linkoffset = label->pos(); | |
| 461 Instruction* link = InstructionAt(linkoffset); | |
| 462 int prevlinkoffset = linkoffset + link->ImmPCOffset(); | |
| 463 | |
| 464 CheckLabelLinkChain(label); | |
| 465 | |
| 466 ASSERT(linkoffset >= 0); | |
| 467 ASSERT(linkoffset < pc_offset()); | |
| 468 ASSERT((linkoffset > prevlinkoffset) || | |
| 469 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain)); | |
| 470 ASSERT(prevlinkoffset >= 0); | |
| 471 | |
| 472 // Update the link to point to the label. | |
| 473 link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); | |
| 474 | |
| 475 // Link the label to the previous link in the chain. | |
| 476 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) { | |
| 477 // We hit kStartOfLabelLinkChain, so the chain is fully processed. | |
| 478 label->Unuse(); | |
| 479 } else { | |
| 480 // Update the label for the next iteration. | |
| 481 label->link_to(prevlinkoffset); | |
| 482 } | |
| 483 } | |
| 484 label->bind_to(pc_offset()); | |
| 485 | |
| 486 ASSERT(label->is_bound()); | |
| 487 ASSERT(!label->is_linked()); | |
| 488 | |
| 489 DeleteUnresolvedBranchInfoForLabel(label); | |
| 490 } | |
| 491 | |
| 492 | |
| 493 int Assembler::LinkAndGetByteOffsetTo(Label* label) { | |
| 494 ASSERT(sizeof(*pc_) == 1); | |
| 495 CheckLabelLinkChain(label); | |
| 496 | |
| 497 int offset; | |
| 498 if (label->is_bound()) { | |
| 499 // The label is bound, so it does not need to be updated. Referring | |
| 500 // instructions must link directly to the label as they will not be | |
| 501 // updated. | |
| 502 // | |
| 503 // In this case, label->pos() returns the offset of the label from the | |
| 504 // start of the buffer. | |
| 505 // | |
| 506 // Note that offset can be zero for self-referential instructions. (This | |
| 507 // could be useful for ADR, for example.) | |
| 508 offset = label->pos() - pc_offset(); | |
| 509 ASSERT(offset <= 0); | |
| 510 } else { | |
| 511 if (label->is_linked()) { | |
| 512 // The label is linked, so the referring instruction should be added onto | |
| 513 // the end of the label's link chain. | |
| 514 // | |
| 515 // In this case, label->pos() returns the offset of the last linked | |
| 516 // instruction from the start of the buffer. | |
| 517 offset = label->pos() - pc_offset(); | |
| 518 ASSERT(offset != kStartOfLabelLinkChain); | |
| 519 // Note that the offset here needs to be PC-relative only so that the | |
| 520 // first instruction in a buffer can link to an unbound label. Otherwise, | |
| 521 // the offset would be 0 for this case, and 0 is reserved for | |
| 522 // kStartOfLabelLinkChain. | |
| 523 } else { | |
| 524 // The label is unused, so it now becomes linked and the referring | |
| 525 // instruction is at the start of the new link chain. | |
| 526 offset = kStartOfLabelLinkChain; | |
| 527 } | |
| 528 // The instruction at pc is now the last link in the label's chain. | |
| 529 label->link_to(pc_offset()); | |
| 530 } | |
| 531 | |
| 532 return offset; | |
| 533 } | |
| 534 | |
| 535 | |
| 536 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { | |
| 537 // Branches to this label will be resolved when the label is bound below. | |
| 538 std::multimap<int, FarBranchInfo>::iterator it_tmp, it; | |
| 539 it = unresolved_branches_.begin(); | |
| 540 while (it != unresolved_branches_.end()) { | |
| 541 it_tmp = it++; | |
| 542 if (it_tmp->second.label_ == label) { | |
| 543 CHECK(it_tmp->first >= pc_offset()); | |
| 544 unresolved_branches_.erase(it_tmp); | |
| 545 } | |
| 546 } | |
| 547 } | |
| 548 | |
| 549 | |
| 550 void Assembler::StartBlockConstPool() { | |
| 551 if (const_pool_blocked_nesting_++ == 0) { | |
| 552 // Prevent constant pool checks happening by setting the next check to | |
| 553 // the biggest possible offset. | |
| 554 next_buffer_check_ = kMaxInt; | |
| 555 } | |
| 556 } | |
| 557 | |
| 558 | |
| 559 void Assembler::EndBlockConstPool() { | |
| 560 if (--const_pool_blocked_nesting_ == 0) { | |
| 561 // Check the constant pool hasn't been blocked for too long. | |
| 562 ASSERT((num_pending_reloc_info_ == 0) || | |
| 563 (pc_offset() < (first_const_pool_use_ + kMaxDistToPool))); | |
| 564 // Two cases: | |
| 565 // * no_const_pool_before_ >= next_buffer_check_ and the emission is | |
| 566 // still blocked | |
| 567 // * no_const_pool_before_ < next_buffer_check_ and the next emit will | |
| 568 // trigger a check. | |
| 569 next_buffer_check_ = no_const_pool_before_; | |
| 570 } | |
| 571 } | |
| 572 | |
| 573 | |
| 574 bool Assembler::is_const_pool_blocked() const { | |
| 575 return (const_pool_blocked_nesting_ > 0) || | |
| 576 (pc_offset() < no_const_pool_before_); | |
| 577 } | |
| 578 | |
| 579 | |
| 580 bool Assembler::IsConstantPoolAt(Instruction* instr) { | |
| 581 // The constant pool marker is made of two instructions. These instructions | |
| 582 // will never be emitted by the JIT, so checking for the first one is enough: | |
| 583 // 0: ldr xzr, #<size of pool> | |
| 584 bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code()); | |
| 585 | |
| 586 // It is still worth asserting the marker is complete. | |
| 587 // 4: blr xzr | |
| 588 ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() && | |
| 589 instr->following()->Rn() == xzr.code())); | |
| 590 | |
| 591 return result; | |
| 592 } | |
| 593 | |
| 594 | |
| 595 int Assembler::ConstantPoolSizeAt(Instruction* instr) { | |
| 596 if (IsConstantPoolAt(instr)) { | |
| 597 return instr->ImmLLiteral(); | |
| 598 } else { | |
| 599 return -1; | |
| 600 } | |
| 601 } | |
| 602 | |
| 603 | |
| 604 void Assembler::ConstantPoolMarker(uint32_t size) { | |
| 605 ASSERT(is_const_pool_blocked()); | |
| 606 // + 1 is for the crash guard. | |
| 607 Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr)); | |
| 608 } | |
| 609 | |
| 610 | |
| 611 void Assembler::ConstantPoolGuard() { | |
| 612 #ifdef DEBUG | |
| 613 // Currently this is only used after a constant pool marker. | |
| 614 ASSERT(is_const_pool_blocked()); | |
| 615 Instruction* instr = reinterpret_cast<Instruction*>(pc_); | |
| 616 ASSERT(instr->preceding()->IsLdrLiteralX() && | |
| 617 instr->preceding()->Rt() == xzr.code()); | |
| 618 #endif | |
| 619 | |
| 620 // We must generate only one instruction. | |
| 621 Emit(BLR | Rn(xzr)); | |
| 622 } | |
| 623 | |
| 624 | |
| 625 void Assembler::br(const Register& xn) { | |
| 626 positions_recorder()->WriteRecordedPositions(); | |
| 627 ASSERT(xn.Is64Bits()); | |
| 628 Emit(BR | Rn(xn)); | |
| 629 } | |
| 630 | |
| 631 | |
| 632 void Assembler::blr(const Register& xn) { | |
| 633 positions_recorder()->WriteRecordedPositions(); | |
| 634 ASSERT(xn.Is64Bits()); | |
| 635 // The pattern 'blr xzr' is used as a guard to detect when execution falls | |
| 636 // through the constant pool. It should not be emitted. | |
| 637 ASSERT(!xn.Is(xzr)); | |
| 638 Emit(BLR | Rn(xn)); | |
| 639 } | |
| 640 | |
| 641 | |
| 642 void Assembler::ret(const Register& xn) { | |
| 643 positions_recorder()->WriteRecordedPositions(); | |
| 644 ASSERT(xn.Is64Bits()); | |
| 645 Emit(RET | Rn(xn)); | |
| 646 } | |
| 647 | |
| 648 | |
| 649 void Assembler::b(int imm26) { | |
| 650 Emit(B | ImmUncondBranch(imm26)); | |
| 651 } | |
| 652 | |
| 653 | |
| 654 void Assembler::b(Label* label) { | |
| 655 positions_recorder()->WriteRecordedPositions(); | |
| 656 b(LinkAndGetInstructionOffsetTo(label)); | |
| 657 } | |
| 658 | |
| 659 | |
| 660 void Assembler::b(int imm19, Condition cond) { | |
| 661 Emit(B_cond | ImmCondBranch(imm19) | cond); | |
| 662 } | |
| 663 | |
| 664 | |
| 665 void Assembler::b(Label* label, Condition cond) { | |
| 666 positions_recorder()->WriteRecordedPositions(); | |
| 667 b(LinkAndGetInstructionOffsetTo(label), cond); | |
| 668 } | |
| 669 | |
| 670 | |
| 671 void Assembler::bl(int imm26) { | |
| 672 positions_recorder()->WriteRecordedPositions(); | |
| 673 Emit(BL | ImmUncondBranch(imm26)); | |
| 674 } | |
| 675 | |
| 676 | |
| 677 void Assembler::bl(Label* label) { | |
| 678 positions_recorder()->WriteRecordedPositions(); | |
| 679 bl(LinkAndGetInstructionOffsetTo(label)); | |
| 680 } | |
| 681 | |
| 682 | |
| 683 void Assembler::cbz(const Register& rt, | |
| 684 int imm19) { | |
| 685 positions_recorder()->WriteRecordedPositions(); | |
| 686 Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); | |
| 687 } | |
| 688 | |
| 689 | |
| 690 void Assembler::cbz(const Register& rt, | |
| 691 Label* label) { | |
| 692 positions_recorder()->WriteRecordedPositions(); | |
| 693 cbz(rt, LinkAndGetInstructionOffsetTo(label)); | |
| 694 } | |
| 695 | |
| 696 | |
| 697 void Assembler::cbnz(const Register& rt, | |
| 698 int imm19) { | |
| 699 positions_recorder()->WriteRecordedPositions(); | |
| 700 Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); | |
| 701 } | |
| 702 | |
| 703 | |
| 704 void Assembler::cbnz(const Register& rt, | |
| 705 Label* label) { | |
| 706 positions_recorder()->WriteRecordedPositions(); | |
| 707 cbnz(rt, LinkAndGetInstructionOffsetTo(label)); | |
| 708 } | |
| 709 | |
| 710 | |
| 711 void Assembler::tbz(const Register& rt, | |
| 712 unsigned bit_pos, | |
| 713 int imm14) { | |
| 714 positions_recorder()->WriteRecordedPositions(); | |
| 715 ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); | |
| 716 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); | |
| 717 } | |
| 718 | |
| 719 | |
| 720 void Assembler::tbz(const Register& rt, | |
| 721 unsigned bit_pos, | |
| 722 Label* label) { | |
| 723 positions_recorder()->WriteRecordedPositions(); | |
| 724 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); | |
| 725 } | |
| 726 | |
| 727 | |
| 728 void Assembler::tbnz(const Register& rt, | |
| 729 unsigned bit_pos, | |
| 730 int imm14) { | |
| 731 positions_recorder()->WriteRecordedPositions(); | |
| 732 ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); | |
| 733 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); | |
| 734 } | |
| 735 | |
| 736 | |
| 737 void Assembler::tbnz(const Register& rt, | |
| 738 unsigned bit_pos, | |
| 739 Label* label) { | |
| 740 positions_recorder()->WriteRecordedPositions(); | |
| 741 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); | |
| 742 } | |
| 743 | |
| 744 | |
| 745 void Assembler::adr(const Register& rd, int imm21) { | |
| 746 ASSERT(rd.Is64Bits()); | |
| 747 Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd)); | |
| 748 } | |
| 749 | |
| 750 | |
| 751 void Assembler::adr(const Register& rd, Label* label) { | |
| 752 adr(rd, LinkAndGetByteOffsetTo(label)); | |
| 753 } | |
| 754 | |
| 755 | |
| 756 void Assembler::add(const Register& rd, | |
| 757 const Register& rn, | |
| 758 const Operand& operand) { | |
| 759 AddSub(rd, rn, operand, LeaveFlags, ADD); | |
| 760 } | |
| 761 | |
| 762 | |
| 763 void Assembler::adds(const Register& rd, | |
| 764 const Register& rn, | |
| 765 const Operand& operand) { | |
| 766 AddSub(rd, rn, operand, SetFlags, ADD); | |
| 767 } | |
| 768 | |
| 769 | |
| 770 void Assembler::cmn(const Register& rn, | |
| 771 const Operand& operand) { | |
| 772 Register zr = AppropriateZeroRegFor(rn); | |
| 773 adds(zr, rn, operand); | |
| 774 } | |
| 775 | |
| 776 | |
| 777 void Assembler::sub(const Register& rd, | |
| 778 const Register& rn, | |
| 779 const Operand& operand) { | |
| 780 AddSub(rd, rn, operand, LeaveFlags, SUB); | |
| 781 } | |
| 782 | |
| 783 | |
| 784 void Assembler::subs(const Register& rd, | |
| 785 const Register& rn, | |
| 786 const Operand& operand) { | |
| 787 AddSub(rd, rn, operand, SetFlags, SUB); | |
| 788 } | |
| 789 | |
| 790 | |
| 791 void Assembler::cmp(const Register& rn, const Operand& operand) { | |
| 792 Register zr = AppropriateZeroRegFor(rn); | |
| 793 subs(zr, rn, operand); | |
| 794 } | |
| 795 | |
| 796 | |
| 797 void Assembler::neg(const Register& rd, const Operand& operand) { | |
| 798 Register zr = AppropriateZeroRegFor(rd); | |
| 799 sub(rd, zr, operand); | |
| 800 } | |
| 801 | |
| 802 | |
| 803 void Assembler::negs(const Register& rd, const Operand& operand) { | |
| 804 Register zr = AppropriateZeroRegFor(rd); | |
| 805 subs(rd, zr, operand); | |
| 806 } | |
| 807 | |
| 808 | |
| 809 void Assembler::adc(const Register& rd, | |
| 810 const Register& rn, | |
| 811 const Operand& operand) { | |
| 812 AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); | |
| 813 } | |
| 814 | |
| 815 | |
| 816 void Assembler::adcs(const Register& rd, | |
| 817 const Register& rn, | |
| 818 const Operand& operand) { | |
| 819 AddSubWithCarry(rd, rn, operand, SetFlags, ADC); | |
| 820 } | |
| 821 | |
| 822 | |
| 823 void Assembler::sbc(const Register& rd, | |
| 824 const Register& rn, | |
| 825 const Operand& operand) { | |
| 826 AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); | |
| 827 } | |
| 828 | |
| 829 | |
| 830 void Assembler::sbcs(const Register& rd, | |
| 831 const Register& rn, | |
| 832 const Operand& operand) { | |
| 833 AddSubWithCarry(rd, rn, operand, SetFlags, SBC); | |
| 834 } | |
| 835 | |
| 836 | |
| 837 void Assembler::ngc(const Register& rd, const Operand& operand) { | |
| 838 Register zr = AppropriateZeroRegFor(rd); | |
| 839 sbc(rd, zr, operand); | |
| 840 } | |
| 841 | |
| 842 | |
| 843 void Assembler::ngcs(const Register& rd, const Operand& operand) { | |
| 844 Register zr = AppropriateZeroRegFor(rd); | |
| 845 sbcs(rd, zr, operand); | |
| 846 } | |
| 847 | |
| 848 | |
| 849 // Logical instructions. | |
| 850 void Assembler::and_(const Register& rd, | |
| 851 const Register& rn, | |
| 852 const Operand& operand) { | |
| 853 Logical(rd, rn, operand, AND); | |
| 854 } | |
| 855 | |
| 856 | |
| 857 void Assembler::ands(const Register& rd, | |
| 858 const Register& rn, | |
| 859 const Operand& operand) { | |
| 860 Logical(rd, rn, operand, ANDS); | |
| 861 } | |
| 862 | |
| 863 | |
| 864 void Assembler::tst(const Register& rn, | |
| 865 const Operand& operand) { | |
| 866 ands(AppropriateZeroRegFor(rn), rn, operand); | |
| 867 } | |
| 868 | |
| 869 | |
| 870 void Assembler::bic(const Register& rd, | |
| 871 const Register& rn, | |
| 872 const Operand& operand) { | |
| 873 Logical(rd, rn, operand, BIC); | |
| 874 } | |
| 875 | |
| 876 | |
| 877 void Assembler::bics(const Register& rd, | |
| 878 const Register& rn, | |
| 879 const Operand& operand) { | |
| 880 Logical(rd, rn, operand, BICS); | |
| 881 } | |
| 882 | |
| 883 | |
| 884 void Assembler::orr(const Register& rd, | |
| 885 const Register& rn, | |
| 886 const Operand& operand) { | |
| 887 Logical(rd, rn, operand, ORR); | |
| 888 } | |
| 889 | |
| 890 | |
| 891 void Assembler::orn(const Register& rd, | |
| 892 const Register& rn, | |
| 893 const Operand& operand) { | |
| 894 Logical(rd, rn, operand, ORN); | |
| 895 } | |
| 896 | |
| 897 | |
| 898 void Assembler::eor(const Register& rd, | |
| 899 const Register& rn, | |
| 900 const Operand& operand) { | |
| 901 Logical(rd, rn, operand, EOR); | |
| 902 } | |
| 903 | |
| 904 | |
| 905 void Assembler::eon(const Register& rd, | |
| 906 const Register& rn, | |
| 907 const Operand& operand) { | |
| 908 Logical(rd, rn, operand, EON); | |
| 909 } | |
| 910 | |
| 911 | |
| 912 void Assembler::lslv(const Register& rd, | |
| 913 const Register& rn, | |
| 914 const Register& rm) { | |
| 915 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 916 ASSERT(rd.SizeInBits() == rm.SizeInBits()); | |
| 917 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); | |
| 918 } | |
| 919 | |
| 920 | |
| 921 void Assembler::lsrv(const Register& rd, | |
| 922 const Register& rn, | |
| 923 const Register& rm) { | |
| 924 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 925 ASSERT(rd.SizeInBits() == rm.SizeInBits()); | |
| 926 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); | |
| 927 } | |
| 928 | |
| 929 | |
| 930 void Assembler::asrv(const Register& rd, | |
| 931 const Register& rn, | |
| 932 const Register& rm) { | |
| 933 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 934 ASSERT(rd.SizeInBits() == rm.SizeInBits()); | |
| 935 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); | |
| 936 } | |
| 937 | |
| 938 | |
| 939 void Assembler::rorv(const Register& rd, | |
| 940 const Register& rn, | |
| 941 const Register& rm) { | |
| 942 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 943 ASSERT(rd.SizeInBits() == rm.SizeInBits()); | |
| 944 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); | |
| 945 } | |
| 946 | |
| 947 | |
| 948 // Bitfield operations. | |
| 949 void Assembler::bfm(const Register& rd, | |
| 950 const Register& rn, | |
| 951 unsigned immr, | |
| 952 unsigned imms) { | |
| 953 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 954 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); | |
| 955 Emit(SF(rd) | BFM | N | | |
| 956 ImmR(immr, rd.SizeInBits()) | | |
| 957 ImmS(imms, rn.SizeInBits()) | | |
| 958 Rn(rn) | Rd(rd)); | |
| 959 } | |
| 960 | |
| 961 | |
| 962 void Assembler::sbfm(const Register& rd, | |
| 963 const Register& rn, | |
| 964 unsigned immr, | |
| 965 unsigned imms) { | |
| 966 ASSERT(rd.Is64Bits() || rn.Is32Bits()); | |
| 967 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); | |
| 968 Emit(SF(rd) | SBFM | N | | |
| 969 ImmR(immr, rd.SizeInBits()) | | |
| 970 ImmS(imms, rn.SizeInBits()) | | |
| 971 Rn(rn) | Rd(rd)); | |
| 972 } | |
| 973 | |
| 974 | |
| 975 void Assembler::ubfm(const Register& rd, | |
| 976 const Register& rn, | |
| 977 unsigned immr, | |
| 978 unsigned imms) { | |
| 979 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 980 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); | |
| 981 Emit(SF(rd) | UBFM | N | | |
| 982 ImmR(immr, rd.SizeInBits()) | | |
| 983 ImmS(imms, rn.SizeInBits()) | | |
| 984 Rn(rn) | Rd(rd)); | |
| 985 } | |
| 986 | |
| 987 | |
| 988 void Assembler::extr(const Register& rd, | |
| 989 const Register& rn, | |
| 990 const Register& rm, | |
| 991 unsigned lsb) { | |
| 992 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 993 ASSERT(rd.SizeInBits() == rm.SizeInBits()); | |
| 994 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); | |
| 995 Emit(SF(rd) | EXTR | N | Rm(rm) | | |
| 996 ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd)); | |
| 997 } | |
| 998 | |
| 999 | |
| 1000 void Assembler::csel(const Register& rd, | |
| 1001 const Register& rn, | |
| 1002 const Register& rm, | |
| 1003 Condition cond) { | |
| 1004 ConditionalSelect(rd, rn, rm, cond, CSEL); | |
| 1005 } | |
| 1006 | |
| 1007 | |
| 1008 void Assembler::csinc(const Register& rd, | |
| 1009 const Register& rn, | |
| 1010 const Register& rm, | |
| 1011 Condition cond) { | |
| 1012 ConditionalSelect(rd, rn, rm, cond, CSINC); | |
| 1013 } | |
| 1014 | |
| 1015 | |
| 1016 void Assembler::csinv(const Register& rd, | |
| 1017 const Register& rn, | |
| 1018 const Register& rm, | |
| 1019 Condition cond) { | |
| 1020 ConditionalSelect(rd, rn, rm, cond, CSINV); | |
| 1021 } | |
| 1022 | |
| 1023 | |
| 1024 void Assembler::csneg(const Register& rd, | |
| 1025 const Register& rn, | |
| 1026 const Register& rm, | |
| 1027 Condition cond) { | |
| 1028 ConditionalSelect(rd, rn, rm, cond, CSNEG); | |
| 1029 } | |
| 1030 | |
| 1031 | |
| 1032 void Assembler::cset(const Register &rd, Condition cond) { | |
| 1033 ASSERT((cond != al) && (cond != nv)); | |
| 1034 Register zr = AppropriateZeroRegFor(rd); | |
| 1035 csinc(rd, zr, zr, InvertCondition(cond)); | |
| 1036 } | |
| 1037 | |
| 1038 | |
| 1039 void Assembler::csetm(const Register &rd, Condition cond) { | |
| 1040 ASSERT((cond != al) && (cond != nv)); | |
| 1041 Register zr = AppropriateZeroRegFor(rd); | |
| 1042 csinv(rd, zr, zr, InvertCondition(cond)); | |
| 1043 } | |
| 1044 | |
| 1045 | |
| 1046 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) { | |
| 1047 ASSERT((cond != al) && (cond != nv)); | |
| 1048 csinc(rd, rn, rn, InvertCondition(cond)); | |
| 1049 } | |
| 1050 | |
| 1051 | |
| 1052 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) { | |
| 1053 ASSERT((cond != al) && (cond != nv)); | |
| 1054 csinv(rd, rn, rn, InvertCondition(cond)); | |
| 1055 } | |
| 1056 | |
| 1057 | |
| 1058 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) { | |
| 1059 ASSERT((cond != al) && (cond != nv)); | |
| 1060 csneg(rd, rn, rn, InvertCondition(cond)); | |
| 1061 } | |
| 1062 | |
| 1063 | |
| 1064 void Assembler::ConditionalSelect(const Register& rd, | |
| 1065 const Register& rn, | |
| 1066 const Register& rm, | |
| 1067 Condition cond, | |
| 1068 ConditionalSelectOp op) { | |
| 1069 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 1070 ASSERT(rd.SizeInBits() == rm.SizeInBits()); | |
| 1071 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); | |
| 1072 } | |
| 1073 | |
| 1074 | |
| 1075 void Assembler::ccmn(const Register& rn, | |
| 1076 const Operand& operand, | |
| 1077 StatusFlags nzcv, | |
| 1078 Condition cond) { | |
| 1079 ConditionalCompare(rn, operand, nzcv, cond, CCMN); | |
| 1080 } | |
| 1081 | |
| 1082 | |
| 1083 void Assembler::ccmp(const Register& rn, | |
| 1084 const Operand& operand, | |
| 1085 StatusFlags nzcv, | |
| 1086 Condition cond) { | |
| 1087 ConditionalCompare(rn, operand, nzcv, cond, CCMP); | |
| 1088 } | |
| 1089 | |
| 1090 | |
| 1091 void Assembler::DataProcessing3Source(const Register& rd, | |
| 1092 const Register& rn, | |
| 1093 const Register& rm, | |
| 1094 const Register& ra, | |
| 1095 DataProcessing3SourceOp op) { | |
| 1096 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); | |
| 1097 } | |
| 1098 | |
| 1099 | |
| 1100 void Assembler::mul(const Register& rd, | |
| 1101 const Register& rn, | |
| 1102 const Register& rm) { | |
| 1103 ASSERT(AreSameSizeAndType(rd, rn, rm)); | |
| 1104 Register zr = AppropriateZeroRegFor(rn); | |
| 1105 DataProcessing3Source(rd, rn, rm, zr, MADD); | |
| 1106 } | |
| 1107 | |
| 1108 | |
| 1109 void Assembler::madd(const Register& rd, | |
| 1110 const Register& rn, | |
| 1111 const Register& rm, | |
| 1112 const Register& ra) { | |
| 1113 ASSERT(AreSameSizeAndType(rd, rn, rm, ra)); | |
| 1114 DataProcessing3Source(rd, rn, rm, ra, MADD); | |
| 1115 } | |
| 1116 | |
| 1117 | |
| 1118 void Assembler::mneg(const Register& rd, | |
| 1119 const Register& rn, | |
| 1120 const Register& rm) { | |
| 1121 ASSERT(AreSameSizeAndType(rd, rn, rm)); | |
| 1122 Register zr = AppropriateZeroRegFor(rn); | |
| 1123 DataProcessing3Source(rd, rn, rm, zr, MSUB); | |
| 1124 } | |
| 1125 | |
| 1126 | |
| 1127 void Assembler::msub(const Register& rd, | |
| 1128 const Register& rn, | |
| 1129 const Register& rm, | |
| 1130 const Register& ra) { | |
| 1131 ASSERT(AreSameSizeAndType(rd, rn, rm, ra)); | |
| 1132 DataProcessing3Source(rd, rn, rm, ra, MSUB); | |
| 1133 } | |
| 1134 | |
| 1135 | |
| 1136 void Assembler::smaddl(const Register& rd, | |
| 1137 const Register& rn, | |
| 1138 const Register& rm, | |
| 1139 const Register& ra) { | |
| 1140 ASSERT(rd.Is64Bits() && ra.Is64Bits()); | |
| 1141 ASSERT(rn.Is32Bits() && rm.Is32Bits()); | |
| 1142 DataProcessing3Source(rd, rn, rm, ra, SMADDL_x); | |
| 1143 } | |
| 1144 | |
| 1145 | |
| 1146 void Assembler::smsubl(const Register& rd, | |
| 1147 const Register& rn, | |
| 1148 const Register& rm, | |
| 1149 const Register& ra) { | |
| 1150 ASSERT(rd.Is64Bits() && ra.Is64Bits()); | |
| 1151 ASSERT(rn.Is32Bits() && rm.Is32Bits()); | |
| 1152 DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x); | |
| 1153 } | |
| 1154 | |
| 1155 | |
| 1156 void Assembler::umaddl(const Register& rd, | |
| 1157 const Register& rn, | |
| 1158 const Register& rm, | |
| 1159 const Register& ra) { | |
| 1160 ASSERT(rd.Is64Bits() && ra.Is64Bits()); | |
| 1161 ASSERT(rn.Is32Bits() && rm.Is32Bits()); | |
| 1162 DataProcessing3Source(rd, rn, rm, ra, UMADDL_x); | |
| 1163 } | |
| 1164 | |
| 1165 | |
| 1166 void Assembler::umsubl(const Register& rd, | |
| 1167 const Register& rn, | |
| 1168 const Register& rm, | |
| 1169 const Register& ra) { | |
| 1170 ASSERT(rd.Is64Bits() && ra.Is64Bits()); | |
| 1171 ASSERT(rn.Is32Bits() && rm.Is32Bits()); | |
| 1172 DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x); | |
| 1173 } | |
| 1174 | |
| 1175 | |
| 1176 void Assembler::smull(const Register& rd, | |
| 1177 const Register& rn, | |
| 1178 const Register& rm) { | |
| 1179 ASSERT(rd.Is64Bits()); | |
| 1180 ASSERT(rn.Is32Bits() && rm.Is32Bits()); | |
| 1181 DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x); | |
| 1182 } | |
| 1183 | |
| 1184 | |
| 1185 void Assembler::smulh(const Register& rd, | |
| 1186 const Register& rn, | |
| 1187 const Register& rm) { | |
| 1188 ASSERT(AreSameSizeAndType(rd, rn, rm)); | |
| 1189 DataProcessing3Source(rd, rn, rm, xzr, SMULH_x); | |
| 1190 } | |
| 1191 | |
| 1192 | |
| 1193 void Assembler::sdiv(const Register& rd, | |
| 1194 const Register& rn, | |
| 1195 const Register& rm) { | |
| 1196 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 1197 ASSERT(rd.SizeInBits() == rm.SizeInBits()); | |
| 1198 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); | |
| 1199 } | |
| 1200 | |
| 1201 | |
| 1202 void Assembler::udiv(const Register& rd, | |
| 1203 const Register& rn, | |
| 1204 const Register& rm) { | |
| 1205 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 1206 ASSERT(rd.SizeInBits() == rm.SizeInBits()); | |
| 1207 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); | |
| 1208 } | |
| 1209 | |
| 1210 | |
| 1211 void Assembler::rbit(const Register& rd, | |
| 1212 const Register& rn) { | |
| 1213 DataProcessing1Source(rd, rn, RBIT); | |
| 1214 } | |
| 1215 | |
| 1216 | |
| 1217 void Assembler::rev16(const Register& rd, | |
| 1218 const Register& rn) { | |
| 1219 DataProcessing1Source(rd, rn, REV16); | |
| 1220 } | |
| 1221 | |
| 1222 | |
| 1223 void Assembler::rev32(const Register& rd, | |
| 1224 const Register& rn) { | |
| 1225 ASSERT(rd.Is64Bits()); | |
| 1226 DataProcessing1Source(rd, rn, REV); | |
| 1227 } | |
| 1228 | |
| 1229 | |
| 1230 void Assembler::rev(const Register& rd, | |
| 1231 const Register& rn) { | |
| 1232 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); | |
| 1233 } | |
| 1234 | |
| 1235 | |
| 1236 void Assembler::clz(const Register& rd, | |
| 1237 const Register& rn) { | |
| 1238 DataProcessing1Source(rd, rn, CLZ); | |
| 1239 } | |
| 1240 | |
| 1241 | |
| 1242 void Assembler::cls(const Register& rd, | |
| 1243 const Register& rn) { | |
| 1244 DataProcessing1Source(rd, rn, CLS); | |
| 1245 } | |
| 1246 | |
| 1247 | |
| 1248 void Assembler::ldp(const CPURegister& rt, | |
| 1249 const CPURegister& rt2, | |
| 1250 const MemOperand& src) { | |
| 1251 LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); | |
| 1252 } | |
| 1253 | |
| 1254 | |
| 1255 void Assembler::stp(const CPURegister& rt, | |
| 1256 const CPURegister& rt2, | |
| 1257 const MemOperand& dst) { | |
| 1258 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); | |
| 1259 } | |
| 1260 | |
| 1261 | |
| 1262 void Assembler::ldpsw(const Register& rt, | |
| 1263 const Register& rt2, | |
| 1264 const MemOperand& src) { | |
| 1265 ASSERT(rt.Is64Bits()); | |
| 1266 LoadStorePair(rt, rt2, src, LDPSW_x); | |
| 1267 } | |
| 1268 | |
| 1269 | |
| 1270 void Assembler::LoadStorePair(const CPURegister& rt, | |
| 1271 const CPURegister& rt2, | |
| 1272 const MemOperand& addr, | |
| 1273 LoadStorePairOp op) { | |
| 1274 // 'rt' and 'rt2' can only be aliased for stores. | |
| 1275 ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); | |
| 1276 ASSERT(AreSameSizeAndType(rt, rt2)); | |
| 1277 | |
| 1278 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | | |
| 1279 ImmLSPair(addr.offset(), CalcLSPairDataSize(op)); | |
| 1280 | |
| 1281 Instr addrmodeop; | |
| 1282 if (addr.IsImmediateOffset()) { | |
| 1283 addrmodeop = LoadStorePairOffsetFixed; | |
| 1284 } else { | |
| 1285 // Pre-index and post-index modes. | |
| 1286 ASSERT(!rt.Is(addr.base())); | |
| 1287 ASSERT(!rt2.Is(addr.base())); | |
| 1288 ASSERT(addr.offset() != 0); | |
| 1289 if (addr.IsPreIndex()) { | |
| 1290 addrmodeop = LoadStorePairPreIndexFixed; | |
| 1291 } else { | |
| 1292 ASSERT(addr.IsPostIndex()); | |
| 1293 addrmodeop = LoadStorePairPostIndexFixed; | |
| 1294 } | |
| 1295 } | |
| 1296 Emit(addrmodeop | memop); | |
| 1297 } | |
| 1298 | |
| 1299 | |
| 1300 void Assembler::ldnp(const CPURegister& rt, | |
| 1301 const CPURegister& rt2, | |
| 1302 const MemOperand& src) { | |
| 1303 LoadStorePairNonTemporal(rt, rt2, src, | |
| 1304 LoadPairNonTemporalOpFor(rt, rt2)); | |
| 1305 } | |
| 1306 | |
| 1307 | |
| 1308 void Assembler::stnp(const CPURegister& rt, | |
| 1309 const CPURegister& rt2, | |
| 1310 const MemOperand& dst) { | |
| 1311 LoadStorePairNonTemporal(rt, rt2, dst, | |
| 1312 StorePairNonTemporalOpFor(rt, rt2)); | |
| 1313 } | |
| 1314 | |
| 1315 | |
| 1316 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt, | |
| 1317 const CPURegister& rt2, | |
| 1318 const MemOperand& addr, | |
| 1319 LoadStorePairNonTemporalOp op) { | |
| 1320 ASSERT(!rt.Is(rt2)); | |
| 1321 ASSERT(AreSameSizeAndType(rt, rt2)); | |
| 1322 ASSERT(addr.IsImmediateOffset()); | |
| 1323 | |
| 1324 LSDataSize size = CalcLSPairDataSize( | |
| 1325 static_cast<LoadStorePairOp>(op & LoadStorePairMask)); | |
| 1326 Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | | |
| 1327 ImmLSPair(addr.offset(), size)); | |
| 1328 } | |
| 1329 | |
| 1330 | |
| 1331 // Memory instructions. | |
| 1332 void Assembler::ldrb(const Register& rt, const MemOperand& src) { | |
| 1333 LoadStore(rt, src, LDRB_w); | |
| 1334 } | |
| 1335 | |
| 1336 | |
| 1337 void Assembler::strb(const Register& rt, const MemOperand& dst) { | |
| 1338 LoadStore(rt, dst, STRB_w); | |
| 1339 } | |
| 1340 | |
| 1341 | |
| 1342 void Assembler::ldrsb(const Register& rt, const MemOperand& src) { | |
| 1343 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w); | |
| 1344 } | |
| 1345 | |
| 1346 | |
| 1347 void Assembler::ldrh(const Register& rt, const MemOperand& src) { | |
| 1348 LoadStore(rt, src, LDRH_w); | |
| 1349 } | |
| 1350 | |
| 1351 | |
| 1352 void Assembler::strh(const Register& rt, const MemOperand& dst) { | |
| 1353 LoadStore(rt, dst, STRH_w); | |
| 1354 } | |
| 1355 | |
| 1356 | |
| 1357 void Assembler::ldrsh(const Register& rt, const MemOperand& src) { | |
| 1358 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w); | |
| 1359 } | |
| 1360 | |
| 1361 | |
| 1362 void Assembler::ldr(const CPURegister& rt, const MemOperand& src) { | |
| 1363 LoadStore(rt, src, LoadOpFor(rt)); | |
| 1364 } | |
| 1365 | |
| 1366 | |
| 1367 void Assembler::str(const CPURegister& rt, const MemOperand& src) { | |
| 1368 LoadStore(rt, src, StoreOpFor(rt)); | |
| 1369 } | |
| 1370 | |
| 1371 | |
| 1372 void Assembler::ldrsw(const Register& rt, const MemOperand& src) { | |
| 1373 ASSERT(rt.Is64Bits()); | |
| 1374 LoadStore(rt, src, LDRSW_x); | |
| 1375 } | |
| 1376 | |
| 1377 | |
| 1378 void Assembler::ldr(const Register& rt, uint64_t imm) { | |
| 1379 // TODO(all): Constant pool may be garbage collected. Hence we cannot store | |
| 1380 // TODO(all): arbitrary values in them. Manually move it for now. | |
| 1381 // TODO(all): Fix MacroAssembler::Fmov when this is implemented. | |
| 1382 UNIMPLEMENTED(); | |
| 1383 } | |
| 1384 | |
| 1385 | |
| 1386 void Assembler::ldr(const FPRegister& ft, double imm) { | |
| 1387 // TODO(all): Constant pool may be garbage collected. Hence we cannot store | |
| 1388 // TODO(all): arbitrary values in them. Manually move it for now. | |
| 1389 // TODO(all): Fix MacroAssembler::Fmov when this is implemented. | |
| 1390 UNIMPLEMENTED(); | |
| 1391 } | |
| 1392 | |
| 1393 | |
| 1394 void Assembler::mov(const Register& rd, const Register& rm) { | |
| 1395 // Moves involving the stack pointer are encoded as add immediate with | |
| 1396 // second operand of zero. Otherwise, orr with first operand zr is | |
| 1397 // used. | |
| 1398 if (rd.IsSP() || rm.IsSP()) { | |
| 1399 add(rd, rm, 0); | |
| 1400 } else { | |
| 1401 orr(rd, AppropriateZeroRegFor(rd), rm); | |
| 1402 } | |
| 1403 } | |
| 1404 | |
| 1405 | |
| 1406 void Assembler::mvn(const Register& rd, const Operand& operand) { | |
| 1407 orn(rd, AppropriateZeroRegFor(rd), operand); | |
| 1408 } | |
| 1409 | |
| 1410 | |
| 1411 void Assembler::mrs(const Register& rt, SystemRegister sysreg) { | |
| 1412 ASSERT(rt.Is64Bits()); | |
| 1413 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); | |
| 1414 } | |
| 1415 | |
| 1416 | |
| 1417 void Assembler::msr(SystemRegister sysreg, const Register& rt) { | |
| 1418 ASSERT(rt.Is64Bits()); | |
| 1419 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); | |
| 1420 } | |
| 1421 | |
| 1422 | |
| 1423 void Assembler::hint(SystemHint code) { | |
| 1424 Emit(HINT | ImmHint(code) | Rt(xzr)); | |
| 1425 } | |
| 1426 | |
| 1427 | |
| 1428 void Assembler::dmb(BarrierDomain domain, BarrierType type) { | |
| 1429 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); | |
| 1430 } | |
| 1431 | |
| 1432 | |
| 1433 void Assembler::dsb(BarrierDomain domain, BarrierType type) { | |
| 1434 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); | |
| 1435 } | |
| 1436 | |
| 1437 | |
| 1438 void Assembler::isb() { | |
| 1439 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); | |
| 1440 } | |
| 1441 | |
| 1442 | |
| 1443 void Assembler::fmov(FPRegister fd, double imm) { | |
| 1444 if (fd.Is64Bits() && IsImmFP64(imm)) { | |
| 1445 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); | |
| 1446 } else if (fd.Is32Bits() && IsImmFP32(imm)) { | |
| 1447 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(static_cast<float>(imm))); | |
| 1448 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { | |
| 1449 Register zr = AppropriateZeroRegFor(fd); | |
| 1450 fmov(fd, zr); | |
| 1451 } else { | |
| 1452 ldr(fd, imm); | |
| 1453 } | |
| 1454 } | |
| 1455 | |
| 1456 | |
| 1457 void Assembler::fmov(Register rd, FPRegister fn) { | |
| 1458 ASSERT(rd.SizeInBits() == fn.SizeInBits()); | |
| 1459 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; | |
| 1460 Emit(op | Rd(rd) | Rn(fn)); | |
| 1461 } | |
| 1462 | |
| 1463 | |
| 1464 void Assembler::fmov(FPRegister fd, Register rn) { | |
| 1465 ASSERT(fd.SizeInBits() == rn.SizeInBits()); | |
| 1466 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; | |
| 1467 Emit(op | Rd(fd) | Rn(rn)); | |
| 1468 } | |
| 1469 | |
| 1470 | |
| 1471 void Assembler::fmov(FPRegister fd, FPRegister fn) { | |
| 1472 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1473 Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn)); | |
| 1474 } | |
| 1475 | |
| 1476 | |
| 1477 void Assembler::fadd(const FPRegister& fd, | |
| 1478 const FPRegister& fn, | |
| 1479 const FPRegister& fm) { | |
| 1480 FPDataProcessing2Source(fd, fn, fm, FADD); | |
| 1481 } | |
| 1482 | |
| 1483 | |
| 1484 void Assembler::fsub(const FPRegister& fd, | |
| 1485 const FPRegister& fn, | |
| 1486 const FPRegister& fm) { | |
| 1487 FPDataProcessing2Source(fd, fn, fm, FSUB); | |
| 1488 } | |
| 1489 | |
| 1490 | |
| 1491 void Assembler::fmul(const FPRegister& fd, | |
| 1492 const FPRegister& fn, | |
| 1493 const FPRegister& fm) { | |
| 1494 FPDataProcessing2Source(fd, fn, fm, FMUL); | |
| 1495 } | |
| 1496 | |
| 1497 | |
| 1498 void Assembler::fmadd(const FPRegister& fd, | |
| 1499 const FPRegister& fn, | |
| 1500 const FPRegister& fm, | |
| 1501 const FPRegister& fa) { | |
| 1502 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d); | |
| 1503 } | |
| 1504 | |
| 1505 | |
| 1506 void Assembler::fmsub(const FPRegister& fd, | |
| 1507 const FPRegister& fn, | |
| 1508 const FPRegister& fm, | |
| 1509 const FPRegister& fa) { | |
| 1510 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d); | |
| 1511 } | |
| 1512 | |
| 1513 | |
| 1514 void Assembler::fnmadd(const FPRegister& fd, | |
| 1515 const FPRegister& fn, | |
| 1516 const FPRegister& fm, | |
| 1517 const FPRegister& fa) { | |
| 1518 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d); | |
| 1519 } | |
| 1520 | |
| 1521 | |
| 1522 void Assembler::fnmsub(const FPRegister& fd, | |
| 1523 const FPRegister& fn, | |
| 1524 const FPRegister& fm, | |
| 1525 const FPRegister& fa) { | |
| 1526 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d); | |
| 1527 } | |
| 1528 | |
| 1529 | |
| 1530 void Assembler::fdiv(const FPRegister& fd, | |
| 1531 const FPRegister& fn, | |
| 1532 const FPRegister& fm) { | |
| 1533 FPDataProcessing2Source(fd, fn, fm, FDIV); | |
| 1534 } | |
| 1535 | |
| 1536 | |
| 1537 void Assembler::fmax(const FPRegister& fd, | |
| 1538 const FPRegister& fn, | |
| 1539 const FPRegister& fm) { | |
| 1540 FPDataProcessing2Source(fd, fn, fm, FMAX); | |
| 1541 } | |
| 1542 | |
| 1543 | |
| 1544 void Assembler::fmaxnm(const FPRegister& fd, | |
| 1545 const FPRegister& fn, | |
| 1546 const FPRegister& fm) { | |
| 1547 FPDataProcessing2Source(fd, fn, fm, FMAXNM); | |
| 1548 } | |
| 1549 | |
| 1550 | |
| 1551 void Assembler::fmin(const FPRegister& fd, | |
| 1552 const FPRegister& fn, | |
| 1553 const FPRegister& fm) { | |
| 1554 FPDataProcessing2Source(fd, fn, fm, FMIN); | |
| 1555 } | |
| 1556 | |
| 1557 | |
| 1558 void Assembler::fminnm(const FPRegister& fd, | |
| 1559 const FPRegister& fn, | |
| 1560 const FPRegister& fm) { | |
| 1561 FPDataProcessing2Source(fd, fn, fm, FMINNM); | |
| 1562 } | |
| 1563 | |
| 1564 | |
| 1565 void Assembler::fabs(const FPRegister& fd, | |
| 1566 const FPRegister& fn) { | |
| 1567 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1568 FPDataProcessing1Source(fd, fn, FABS); | |
| 1569 } | |
| 1570 | |
| 1571 | |
| 1572 void Assembler::fneg(const FPRegister& fd, | |
| 1573 const FPRegister& fn) { | |
| 1574 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1575 FPDataProcessing1Source(fd, fn, FNEG); | |
| 1576 } | |
| 1577 | |
| 1578 | |
| 1579 void Assembler::fsqrt(const FPRegister& fd, | |
| 1580 const FPRegister& fn) { | |
| 1581 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1582 FPDataProcessing1Source(fd, fn, FSQRT); | |
| 1583 } | |
| 1584 | |
| 1585 | |
| 1586 void Assembler::frinta(const FPRegister& fd, | |
| 1587 const FPRegister& fn) { | |
| 1588 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1589 FPDataProcessing1Source(fd, fn, FRINTA); | |
| 1590 } | |
| 1591 | |
| 1592 | |
| 1593 void Assembler::frintn(const FPRegister& fd, | |
| 1594 const FPRegister& fn) { | |
| 1595 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1596 FPDataProcessing1Source(fd, fn, FRINTN); | |
| 1597 } | |
| 1598 | |
| 1599 | |
| 1600 void Assembler::frintz(const FPRegister& fd, | |
| 1601 const FPRegister& fn) { | |
| 1602 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1603 FPDataProcessing1Source(fd, fn, FRINTZ); | |
| 1604 } | |
| 1605 | |
| 1606 | |
| 1607 void Assembler::fcmp(const FPRegister& fn, | |
| 1608 const FPRegister& fm) { | |
| 1609 ASSERT(fn.SizeInBits() == fm.SizeInBits()); | |
| 1610 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn)); | |
| 1611 } | |
| 1612 | |
| 1613 | |
| 1614 void Assembler::fcmp(const FPRegister& fn, | |
| 1615 double value) { | |
| 1616 USE(value); | |
| 1617 // Although the fcmp instruction can strictly only take an immediate value of | |
| 1618 // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't | |
| 1619 // affect the result of the comparison. | |
| 1620 ASSERT(value == 0.0); | |
| 1621 Emit(FPType(fn) | FCMP_zero | Rn(fn)); | |
| 1622 } | |
| 1623 | |
| 1624 | |
| 1625 void Assembler::fccmp(const FPRegister& fn, | |
| 1626 const FPRegister& fm, | |
| 1627 StatusFlags nzcv, | |
| 1628 Condition cond) { | |
| 1629 ASSERT(fn.SizeInBits() == fm.SizeInBits()); | |
| 1630 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv)); | |
| 1631 } | |
| 1632 | |
| 1633 | |
| 1634 void Assembler::fcsel(const FPRegister& fd, | |
| 1635 const FPRegister& fn, | |
| 1636 const FPRegister& fm, | |
| 1637 Condition cond) { | |
| 1638 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1639 ASSERT(fd.SizeInBits() == fm.SizeInBits()); | |
| 1640 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd)); | |
| 1641 } | |
| 1642 | |
| 1643 | |
| 1644 void Assembler::FPConvertToInt(const Register& rd, | |
| 1645 const FPRegister& fn, | |
| 1646 FPIntegerConvertOp op) { | |
| 1647 Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd)); | |
| 1648 } | |
| 1649 | |
| 1650 | |
| 1651 void Assembler::fcvt(const FPRegister& fd, | |
| 1652 const FPRegister& fn) { | |
| 1653 if (fd.Is64Bits()) { | |
| 1654 // Convert float to double. | |
| 1655 ASSERT(fn.Is32Bits()); | |
| 1656 FPDataProcessing1Source(fd, fn, FCVT_ds); | |
| 1657 } else { | |
| 1658 // Convert double to float. | |
| 1659 ASSERT(fn.Is64Bits()); | |
| 1660 FPDataProcessing1Source(fd, fn, FCVT_sd); | |
| 1661 } | |
| 1662 } | |
| 1663 | |
| 1664 | |
| 1665 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) { | |
| 1666 FPConvertToInt(rd, fn, FCVTAU); | |
| 1667 } | |
| 1668 | |
| 1669 | |
| 1670 void Assembler::fcvtas(const Register& rd, const FPRegister& fn) { | |
| 1671 FPConvertToInt(rd, fn, FCVTAS); | |
| 1672 } | |
| 1673 | |
| 1674 | |
| 1675 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) { | |
| 1676 FPConvertToInt(rd, fn, FCVTMU); | |
| 1677 } | |
| 1678 | |
| 1679 | |
| 1680 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) { | |
| 1681 FPConvertToInt(rd, fn, FCVTMS); | |
| 1682 } | |
| 1683 | |
| 1684 | |
| 1685 void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) { | |
| 1686 FPConvertToInt(rd, fn, FCVTNU); | |
| 1687 } | |
| 1688 | |
| 1689 | |
| 1690 void Assembler::fcvtns(const Register& rd, const FPRegister& fn) { | |
| 1691 FPConvertToInt(rd, fn, FCVTNS); | |
| 1692 } | |
| 1693 | |
| 1694 | |
| 1695 void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) { | |
| 1696 FPConvertToInt(rd, fn, FCVTZU); | |
| 1697 } | |
| 1698 | |
| 1699 | |
| 1700 void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) { | |
| 1701 FPConvertToInt(rd, fn, FCVTZS); | |
| 1702 } | |
| 1703 | |
| 1704 | |
| 1705 void Assembler::scvtf(const FPRegister& fd, | |
| 1706 const Register& rn, | |
| 1707 unsigned fbits) { | |
| 1708 if (fbits == 0) { | |
| 1709 Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd)); | |
| 1710 } else { | |
| 1711 Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | | |
| 1712 Rd(fd)); | |
| 1713 } | |
| 1714 } | |
| 1715 | |
| 1716 | |
| 1717 void Assembler::ucvtf(const FPRegister& fd, | |
| 1718 const Register& rn, | |
| 1719 unsigned fbits) { | |
| 1720 if (fbits == 0) { | |
| 1721 Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd)); | |
| 1722 } else { | |
| 1723 Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | | |
| 1724 Rd(fd)); | |
| 1725 } | |
| 1726 } | |
| 1727 | |
| 1728 | |
| 1729 // Note: | |
| 1730 // Below, a difference in case for the same letter indicates a | |
| 1731 // negated bit. | |
| 1732 // If b is 1, then B is 0. | |
| 1733 Instr Assembler::ImmFP32(float imm) { | |
| 1734 ASSERT(IsImmFP32(imm)); | |
| 1735 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 | |
| 1736 uint32_t bits = float_to_rawbits(imm); | |
| 1737 // bit7: a000.0000 | |
| 1738 uint32_t bit7 = ((bits >> 31) & 0x1) << 7; | |
| 1739 // bit6: 0b00.0000 | |
| 1740 uint32_t bit6 = ((bits >> 29) & 0x1) << 6; | |
| 1741 // bit5_to_0: 00cd.efgh | |
| 1742 uint32_t bit5_to_0 = (bits >> 19) & 0x3f; | |
| 1743 | |
| 1744 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; | |
| 1745 } | |
| 1746 | |
| 1747 | |
| 1748 Instr Assembler::ImmFP64(double imm) { | |
| 1749 ASSERT(IsImmFP64(imm)); | |
| 1750 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 | |
| 1751 // 0000.0000.0000.0000.0000.0000.0000.0000 | |
| 1752 uint64_t bits = double_to_rawbits(imm); | |
| 1753 // bit7: a000.0000 | |
| 1754 uint32_t bit7 = ((bits >> 63) & 0x1) << 7; | |
| 1755 // bit6: 0b00.0000 | |
| 1756 uint32_t bit6 = ((bits >> 61) & 0x1) << 6; | |
| 1757 // bit5_to_0: 00cd.efgh | |
| 1758 uint32_t bit5_to_0 = (bits >> 48) & 0x3f; | |
| 1759 | |
| 1760 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; | |
| 1761 } | |
| 1762 | |
| 1763 | |
| 1764 // Code generation helpers. | |
| 1765 void Assembler::MoveWide(const Register& rd, | |
| 1766 uint64_t imm, | |
| 1767 int shift, | |
| 1768 MoveWideImmediateOp mov_op) { | |
| 1769 if (shift >= 0) { | |
| 1770 // Explicit shift specified. | |
| 1771 ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); | |
| 1772 ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); | |
| 1773 shift /= 16; | |
| 1774 } else { | |
| 1775 // Calculate a new immediate and shift combination to encode the immediate | |
| 1776 // argument. | |
| 1777 shift = 0; | |
| 1778 if ((imm & ~0xffffUL) == 0) { | |
| 1779 // Nothing to do. | |
| 1780 } else if ((imm & ~(0xffffUL << 16)) == 0) { | |
| 1781 imm >>= 16; | |
| 1782 shift = 1; | |
| 1783 } else if ((imm & ~(0xffffUL << 32)) == 0) { | |
| 1784 ASSERT(rd.Is64Bits()); | |
| 1785 imm >>= 32; | |
| 1786 shift = 2; | |
| 1787 } else if ((imm & ~(0xffffUL << 48)) == 0) { | |
| 1788 ASSERT(rd.Is64Bits()); | |
| 1789 imm >>= 48; | |
| 1790 shift = 3; | |
| 1791 } | |
| 1792 } | |
| 1793 | |
| 1794 ASSERT(is_uint16(imm)); | |
| 1795 | |
| 1796 Emit(SF(rd) | MoveWideImmediateFixed | mov_op | | |
| 1797 Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift)); | |
| 1798 } | |
| 1799 | |
| 1800 | |
| 1801 void Assembler::AddSub(const Register& rd, | |
| 1802 const Register& rn, | |
| 1803 const Operand& operand, | |
| 1804 FlagsUpdate S, | |
| 1805 AddSubOp op) { | |
| 1806 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 1807 ASSERT(!operand.NeedsRelocation()); | |
| 1808 if (operand.IsImmediate()) { | |
| 1809 int64_t immediate = operand.immediate(); | |
| 1810 ASSERT(IsImmAddSub(immediate)); | |
| 1811 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); | |
| 1812 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | | |
| 1813 ImmAddSub(immediate) | dest_reg | RnSP(rn)); | |
| 1814 } else if (operand.IsShiftedRegister()) { | |
| 1815 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | |
| 1816 ASSERT(operand.shift() != ROR); | |
| 1817 | |
| 1818 // For instructions of the form: | |
| 1819 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] | |
| 1820 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ] | |
| 1821 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ] | |
| 1822 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ] | |
| 1823 // or their 64-bit register equivalents, convert the operand from shifted to | |
| 1824 // extended register mode, and emit an add/sub extended instruction. | |
| 1825 if (rn.IsSP() || rd.IsSP()) { | |
| 1826 ASSERT(!(rd.IsSP() && (S == SetFlags))); | |
| 1827 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S, | |
| 1828 AddSubExtendedFixed | op); | |
| 1829 } else { | |
| 1830 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); | |
| 1831 } | |
| 1832 } else { | |
| 1833 ASSERT(operand.IsExtendedRegister()); | |
| 1834 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); | |
| 1835 } | |
| 1836 } | |
| 1837 | |
| 1838 | |
| 1839 void Assembler::AddSubWithCarry(const Register& rd, | |
| 1840 const Register& rn, | |
| 1841 const Operand& operand, | |
| 1842 FlagsUpdate S, | |
| 1843 AddSubWithCarryOp op) { | |
| 1844 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 1845 ASSERT(rd.SizeInBits() == operand.reg().SizeInBits()); | |
| 1846 ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); | |
| 1847 ASSERT(!operand.NeedsRelocation()); | |
| 1848 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); | |
| 1849 } | |
| 1850 | |
| 1851 | |
| 1852 void Assembler::hlt(int code) { | |
| 1853 ASSERT(is_uint16(code)); | |
| 1854 Emit(HLT | ImmException(code)); | |
| 1855 } | |
| 1856 | |
| 1857 | |
| 1858 void Assembler::brk(int code) { | |
| 1859 ASSERT(is_uint16(code)); | |
| 1860 Emit(BRK | ImmException(code)); | |
| 1861 } | |
| 1862 | |
| 1863 | |
| 1864 void Assembler::debug(const char* message, uint32_t code, Instr params) { | |
| 1865 #ifdef USE_SIMULATOR | |
| 1866 // Don't generate simulator specific code if we are building a snapshot, which | |
| 1867 // might be run on real hardware. | |
| 1868 if (!Serializer::enabled()) { | |
| 1869 #ifdef DEBUG | |
| 1870 Serializer::TooLateToEnableNow(); | |
| 1871 #endif | |
| 1872 // The arguments to the debug marker need to be contiguous in memory, so | |
| 1873 // make sure we don't try to emit a literal pool. | |
| 1874 BlockConstPoolScope scope(this); | |
| 1875 | |
| 1876 Label start; | |
| 1877 bind(&start); | |
| 1878 | |
| 1879 // Refer to instructions-a64.h for a description of the marker and its | |
| 1880 // arguments. | |
| 1881 hlt(kImmExceptionIsDebug); | |
| 1882 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset); | |
| 1883 dc32(code); | |
| 1884 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset); | |
| 1885 dc32(params); | |
| 1886 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset); | |
| 1887 EmitStringData(message); | |
| 1888 hlt(kImmExceptionIsUnreachable); | |
| 1889 | |
| 1890 return; | |
| 1891 } | |
| 1892 // Fall through if Serializer is enabled. | |
| 1893 #endif | |
| 1894 | |
| 1895 if (params & BREAK) { | |
| 1896 hlt(kImmExceptionIsDebug); | |
| 1897 } | |
| 1898 } | |
| 1899 | |
| 1900 | |
| 1901 void Assembler::Logical(const Register& rd, | |
| 1902 const Register& rn, | |
| 1903 const Operand& operand, | |
| 1904 LogicalOp op) { | |
| 1905 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 1906 ASSERT(!operand.NeedsRelocation()); | |
| 1907 if (operand.IsImmediate()) { | |
| 1908 int64_t immediate = operand.immediate(); | |
| 1909 unsigned reg_size = rd.SizeInBits(); | |
| 1910 | |
| 1911 ASSERT(immediate != 0); | |
| 1912 ASSERT(immediate != -1); | |
| 1913 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | |
| 1914 | |
| 1915 // If the operation is NOT, invert the operation and immediate. | |
| 1916 if ((op & NOT) == NOT) { | |
| 1917 op = static_cast<LogicalOp>(op & ~NOT); | |
| 1918 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); | |
| 1919 } | |
| 1920 | |
| 1921 unsigned n, imm_s, imm_r; | |
| 1922 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { | |
| 1923 // Immediate can be encoded in the instruction. | |
| 1924 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | |
| 1925 } else { | |
| 1926 // This case is handled in the macro assembler. | |
| 1927 UNREACHABLE(); | |
| 1928 } | |
| 1929 } else { | |
| 1930 ASSERT(operand.IsShiftedRegister()); | |
| 1931 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | |
| 1932 Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); | |
| 1933 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); | |
| 1934 } | |
| 1935 } | |
| 1936 | |
| 1937 | |
| 1938 void Assembler::LogicalImmediate(const Register& rd, | |
| 1939 const Register& rn, | |
| 1940 unsigned n, | |
| 1941 unsigned imm_s, | |
| 1942 unsigned imm_r, | |
| 1943 LogicalOp op) { | |
| 1944 unsigned reg_size = rd.SizeInBits(); | |
| 1945 Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd); | |
| 1946 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | | |
| 1947 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | | |
| 1948 Rn(rn)); | |
| 1949 } | |
| 1950 | |
| 1951 | |
| 1952 void Assembler::ConditionalCompare(const Register& rn, | |
| 1953 const Operand& operand, | |
| 1954 StatusFlags nzcv, | |
| 1955 Condition cond, | |
| 1956 ConditionalCompareOp op) { | |
| 1957 Instr ccmpop; | |
| 1958 ASSERT(!operand.NeedsRelocation()); | |
| 1959 if (operand.IsImmediate()) { | |
| 1960 int64_t immediate = operand.immediate(); | |
| 1961 ASSERT(IsImmConditionalCompare(immediate)); | |
| 1962 ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate); | |
| 1963 } else { | |
| 1964 ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); | |
| 1965 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); | |
| 1966 } | |
| 1967 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); | |
| 1968 } | |
| 1969 | |
| 1970 | |
| 1971 void Assembler::DataProcessing1Source(const Register& rd, | |
| 1972 const Register& rn, | |
| 1973 DataProcessing1SourceOp op) { | |
| 1974 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 1975 Emit(SF(rn) | op | Rn(rn) | Rd(rd)); | |
| 1976 } | |
| 1977 | |
| 1978 | |
| 1979 void Assembler::FPDataProcessing1Source(const FPRegister& fd, | |
| 1980 const FPRegister& fn, | |
| 1981 FPDataProcessing1SourceOp op) { | |
| 1982 Emit(FPType(fn) | op | Rn(fn) | Rd(fd)); | |
| 1983 } | |
| 1984 | |
| 1985 | |
| 1986 void Assembler::FPDataProcessing2Source(const FPRegister& fd, | |
| 1987 const FPRegister& fn, | |
| 1988 const FPRegister& fm, | |
| 1989 FPDataProcessing2SourceOp op) { | |
| 1990 ASSERT(fd.SizeInBits() == fn.SizeInBits()); | |
| 1991 ASSERT(fd.SizeInBits() == fm.SizeInBits()); | |
| 1992 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd)); | |
| 1993 } | |
| 1994 | |
| 1995 | |
| 1996 void Assembler::FPDataProcessing3Source(const FPRegister& fd, | |
| 1997 const FPRegister& fn, | |
| 1998 const FPRegister& fm, | |
| 1999 const FPRegister& fa, | |
| 2000 FPDataProcessing3SourceOp op) { | |
| 2001 ASSERT(AreSameSizeAndType(fd, fn, fm, fa)); | |
| 2002 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa)); | |
| 2003 } | |
| 2004 | |
| 2005 | |
| 2006 void Assembler::EmitShift(const Register& rd, | |
| 2007 const Register& rn, | |
| 2008 Shift shift, | |
| 2009 unsigned shift_amount) { | |
| 2010 switch (shift) { | |
| 2011 case LSL: | |
| 2012 lsl(rd, rn, shift_amount); | |
| 2013 break; | |
| 2014 case LSR: | |
| 2015 lsr(rd, rn, shift_amount); | |
| 2016 break; | |
| 2017 case ASR: | |
| 2018 asr(rd, rn, shift_amount); | |
| 2019 break; | |
| 2020 case ROR: | |
| 2021 ror(rd, rn, shift_amount); | |
| 2022 break; | |
| 2023 default: | |
| 2024 UNREACHABLE(); | |
| 2025 } | |
| 2026 } | |
| 2027 | |
| 2028 | |
| 2029 void Assembler::EmitExtendShift(const Register& rd, | |
| 2030 const Register& rn, | |
| 2031 Extend extend, | |
| 2032 unsigned left_shift) { | |
| 2033 ASSERT(rd.SizeInBits() >= rn.SizeInBits()); | |
| 2034 unsigned reg_size = rd.SizeInBits(); | |
| 2035 // Use the correct size of register. | |
| 2036 Register rn_ = Register::Create(rn.code(), rd.SizeInBits()); | |
| 2037 // Bits extracted are high_bit:0. | |
| 2038 unsigned high_bit = (8 << (extend & 0x3)) - 1; | |
| 2039 // Number of bits left in the result that are not introduced by the shift. | |
| 2040 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1); | |
| 2041 | |
| 2042 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { | |
| 2043 switch (extend) { | |
| 2044 case UXTB: | |
| 2045 case UXTH: | |
| 2046 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break; | |
| 2047 case SXTB: | |
| 2048 case SXTH: | |
| 2049 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break; | |
| 2050 case UXTX: | |
| 2051 case SXTX: { | |
| 2052 ASSERT(rn.SizeInBits() == kXRegSize); | |
| 2053 // Nothing to extend. Just shift. | |
| 2054 lsl(rd, rn_, left_shift); | |
| 2055 break; | |
| 2056 } | |
| 2057 default: UNREACHABLE(); | |
| 2058 } | |
| 2059 } else { | |
| 2060 // No need to extend as the extended bits would be shifted away. | |
| 2061 lsl(rd, rn_, left_shift); | |
| 2062 } | |
| 2063 } | |
| 2064 | |
| 2065 | |
| 2066 void Assembler::DataProcShiftedRegister(const Register& rd, | |
| 2067 const Register& rn, | |
| 2068 const Operand& operand, | |
| 2069 FlagsUpdate S, | |
| 2070 Instr op) { | |
| 2071 ASSERT(operand.IsShiftedRegister()); | |
| 2072 ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); | |
| 2073 ASSERT(!operand.NeedsRelocation()); | |
| 2074 Emit(SF(rd) | op | Flags(S) | | |
| 2075 ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | | |
| 2076 Rm(operand.reg()) | Rn(rn) | Rd(rd)); | |
| 2077 } | |
| 2078 | |
| 2079 | |
| 2080 void Assembler::DataProcExtendedRegister(const Register& rd, | |
| 2081 const Register& rn, | |
| 2082 const Operand& operand, | |
| 2083 FlagsUpdate S, | |
| 2084 Instr op) { | |
| 2085 ASSERT(!operand.NeedsRelocation()); | |
| 2086 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); | |
| 2087 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | | |
| 2088 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) | | |
| 2089 dest_reg | RnSP(rn)); | |
| 2090 } | |
| 2091 | |
| 2092 | |
| 2093 bool Assembler::IsImmAddSub(int64_t immediate) { | |
| 2094 return is_uint12(immediate) || | |
| 2095 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0)); | |
| 2096 } | |
| 2097 | |
| 2098 void Assembler::LoadStore(const CPURegister& rt, | |
| 2099 const MemOperand& addr, | |
| 2100 LoadStoreOp op) { | |
| 2101 Instr memop = op | Rt(rt) | RnSP(addr.base()); | |
| 2102 ptrdiff_t offset = addr.offset(); | |
| 2103 | |
| 2104 if (addr.IsImmediateOffset()) { | |
| 2105 LSDataSize size = CalcLSDataSize(op); | |
| 2106 if (IsImmLSScaled(offset, size)) { | |
| 2107 // Use the scaled addressing mode. | |
| 2108 Emit(LoadStoreUnsignedOffsetFixed | memop | | |
| 2109 ImmLSUnsigned(offset >> size)); | |
| 2110 } else if (IsImmLSUnscaled(offset)) { | |
| 2111 // Use the unscaled addressing mode. | |
| 2112 Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset)); | |
| 2113 } else { | |
| 2114 // This case is handled in the macro assembler. | |
| 2115 UNREACHABLE(); | |
| 2116 } | |
| 2117 } else if (addr.IsRegisterOffset()) { | |
| 2118 Extend ext = addr.extend(); | |
| 2119 Shift shift = addr.shift(); | |
| 2120 unsigned shift_amount = addr.shift_amount(); | |
| 2121 | |
| 2122 // LSL is encoded in the option field as UXTX. | |
| 2123 if (shift == LSL) { | |
| 2124 ext = UXTX; | |
| 2125 } | |
| 2126 | |
| 2127 // Shifts are encoded in one bit, indicating a left shift by the memory | |
| 2128 // access size. | |
| 2129 ASSERT((shift_amount == 0) || | |
| 2130 (shift_amount == static_cast<unsigned>(CalcLSDataSize(op)))); | |
| 2131 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) | | |
| 2132 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0)); | |
| 2133 } else { | |
| 2134 // Pre-index and post-index modes. | |
| 2135 ASSERT(!rt.Is(addr.base())); | |
| 2136 if (IsImmLSUnscaled(offset)) { | |
| 2137 if (addr.IsPreIndex()) { | |
| 2138 Emit(LoadStorePreIndexFixed | memop | ImmLS(offset)); | |
| 2139 } else { | |
| 2140 ASSERT(addr.IsPostIndex()); | |
| 2141 Emit(LoadStorePostIndexFixed | memop | ImmLS(offset)); | |
| 2142 } | |
| 2143 } else { | |
| 2144 // This case is handled in the macro assembler. | |
| 2145 UNREACHABLE(); | |
| 2146 } | |
| 2147 } | |
| 2148 } | |
| 2149 | |
| 2150 | |
| 2151 bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) { | |
| 2152 return is_int9(offset); | |
| 2153 } | |
| 2154 | |
| 2155 | |
| 2156 bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) { | |
| 2157 bool offset_is_size_multiple = (((offset >> size) << size) == offset); | |
| 2158 return offset_is_size_multiple && is_uint12(offset >> size); | |
| 2159 } | |
| 2160 | |
| 2161 | |
| 2162 void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) { | |
| 2163 ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0); | |
| 2164 // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a | |
| 2165 // constant pool. It should not be emitted. | |
| 2166 ASSERT(!rt.Is(xzr)); | |
| 2167 Emit(LDR_x_lit | | |
| 2168 ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) | | |
| 2169 Rt(rt)); | |
| 2170 } | |
| 2171 | |
| 2172 | |
| 2173 void Assembler::LoadRelocatedValue(const CPURegister& rt, | |
| 2174 const Operand& operand, | |
| 2175 LoadLiteralOp op) { | |
| 2176 int64_t imm = operand.immediate(); | |
| 2177 ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits())); | |
| 2178 RecordRelocInfo(operand.rmode(), imm); | |
| 2179 BlockConstPoolFor(1); | |
| 2180 Emit(op | ImmLLiteral(0) | Rt(rt)); | |
| 2181 } | |
| 2182 | |
| 2183 | |
| 2184 // Test if a given value can be encoded in the immediate field of a logical | |
| 2185 // instruction. | |
| 2186 // If it can be encoded, the function returns true, and values pointed to by n, | |
| 2187 // imm_s and imm_r are updated with immediates encoded in the format required | |
| 2188 // by the corresponding fields in the logical instruction. | |
| 2189 // If it can not be encoded, the function returns false, and the values pointed | |
| 2190 // to by n, imm_s and imm_r are undefined. | |
| 2191 bool Assembler::IsImmLogical(uint64_t value, | |
| 2192 unsigned width, | |
| 2193 unsigned* n, | |
| 2194 unsigned* imm_s, | |
| 2195 unsigned* imm_r) { | |
| 2196 ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); | |
| 2197 ASSERT((width == kWRegSize) || (width == kXRegSize)); | |
| 2198 | |
| 2199 // Logical immediates are encoded using parameters n, imm_s and imm_r using | |
| 2200 // the following table: | |
| 2201 // | |
| 2202 // N imms immr size S R | |
| 2203 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) | |
| 2204 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) | |
| 2205 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) | |
| 2206 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) | |
| 2207 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) | |
| 2208 // 0 11110s xxxxxr 2 UInt(s) UInt(r) | |
| 2209 // (s bits must not be all set) | |
| 2210 // | |
| 2211 // A pattern is constructed of size bits, where the least significant S+1 | |
| 2212 // bits are set. The pattern is rotated right by R, and repeated across a | |
| 2213 // 32 or 64-bit value, depending on destination register width. | |
| 2214 // | |
| 2215 // To test if an arbitary immediate can be encoded using this scheme, an | |
| 2216 // iterative algorithm is used. | |
| 2217 // | |
| 2218 // TODO(mcapewel) This code does not consider using X/W register overlap to | |
| 2219 // support 64-bit immediates where the top 32-bits are zero, and the bottom | |
| 2220 // 32-bits are an encodable logical immediate. | |
| 2221 | |
| 2222 // 1. If the value has all set or all clear bits, it can't be encoded. | |
| 2223 if ((value == 0) || (value == 0xffffffffffffffffUL) || | |
| 2224 ((width == kWRegSize) && (value == 0xffffffff))) { | |
| 2225 return false; | |
| 2226 } | |
| 2227 | |
| 2228 unsigned lead_zero = CountLeadingZeros(value, width); | |
| 2229 unsigned lead_one = CountLeadingZeros(~value, width); | |
| 2230 unsigned trail_zero = CountTrailingZeros(value, width); | |
| 2231 unsigned trail_one = CountTrailingZeros(~value, width); | |
| 2232 unsigned set_bits = CountSetBits(value, width); | |
| 2233 | |
| 2234 // The fixed bits in the immediate s field. | |
| 2235 // If width == 64 (X reg), start at 0xFFFFFF80. | |
| 2236 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit | |
| 2237 // widths won't be executed. | |
| 2238 int imm_s_fixed = (width == kXRegSize) ? -128 : -64; | |
| 2239 int imm_s_mask = 0x3F; | |
| 2240 | |
| 2241 for (;;) { | |
| 2242 // 2. If the value is two bits wide, it can be encoded. | |
| 2243 if (width == 2) { | |
| 2244 *n = 0; | |
| 2245 *imm_s = 0x3C; | |
| 2246 *imm_r = (value & 3) - 1; | |
| 2247 return true; | |
| 2248 } | |
| 2249 | |
| 2250 *n = (width == 64) ? 1 : 0; | |
| 2251 *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask); | |
| 2252 if ((lead_zero + set_bits) == width) { | |
| 2253 *imm_r = 0; | |
| 2254 } else { | |
| 2255 *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one; | |
| 2256 } | |
| 2257 | |
| 2258 // 3. If the sum of leading zeros, trailing zeros and set bits is equal to | |
| 2259 // the bit width of the value, it can be encoded. | |
| 2260 if (lead_zero + trail_zero + set_bits == width) { | |
| 2261 return true; | |
| 2262 } | |
| 2263 | |
| 2264 // 4. If the sum of leading ones, trailing ones and unset bits in the | |
| 2265 // value is equal to the bit width of the value, it can be encoded. | |
| 2266 if (lead_one + trail_one + (width - set_bits) == width) { | |
| 2267 return true; | |
| 2268 } | |
| 2269 | |
| 2270 // 5. If the most-significant half of the bitwise value is equal to the | |
| 2271 // least-significant half, return to step 2 using the least-significant | |
| 2272 // half of the value. | |
| 2273 uint64_t mask = (1UL << (width >> 1)) - 1; | |
| 2274 if ((value & mask) == ((value >> (width >> 1)) & mask)) { | |
| 2275 width >>= 1; | |
| 2276 set_bits >>= 1; | |
| 2277 imm_s_fixed >>= 1; | |
| 2278 continue; | |
| 2279 } | |
| 2280 | |
| 2281 // 6. Otherwise, the value can't be encoded. | |
| 2282 return false; | |
| 2283 } | |
| 2284 } | |
| 2285 | |
| 2286 | |
| 2287 bool Assembler::IsImmConditionalCompare(int64_t immediate) { | |
| 2288 return is_uint5(immediate); | |
| 2289 } | |
| 2290 | |
| 2291 | |
| 2292 bool Assembler::IsImmFP32(float imm) { | |
| 2293 // Valid values will have the form: | |
| 2294 // aBbb.bbbc.defg.h000.0000.0000.0000.0000 | |
| 2295 uint32_t bits = float_to_rawbits(imm); | |
| 2296 // bits[19..0] are cleared. | |
| 2297 if ((bits & 0x7ffff) != 0) { | |
| 2298 return false; | |
| 2299 } | |
| 2300 | |
| 2301 // bits[29..25] are all set or all cleared. | |
| 2302 uint32_t b_pattern = (bits >> 16) & 0x3e00; | |
| 2303 if (b_pattern != 0 && b_pattern != 0x3e00) { | |
| 2304 return false; | |
| 2305 } | |
| 2306 | |
| 2307 // bit[30] and bit[29] are opposite. | |
| 2308 if (((bits ^ (bits << 1)) & 0x40000000) == 0) { | |
| 2309 return false; | |
| 2310 } | |
| 2311 | |
| 2312 return true; | |
| 2313 } | |
| 2314 | |
| 2315 | |
| 2316 bool Assembler::IsImmFP64(double imm) { | |
| 2317 // Valid values will have the form: | |
| 2318 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 | |
| 2319 // 0000.0000.0000.0000.0000.0000.0000.0000 | |
| 2320 uint64_t bits = double_to_rawbits(imm); | |
| 2321 // bits[47..0] are cleared. | |
| 2322 if ((bits & 0xffffffffffffL) != 0) { | |
| 2323 return false; | |
| 2324 } | |
| 2325 | |
| 2326 // bits[61..54] are all set or all cleared. | |
| 2327 uint32_t b_pattern = (bits >> 48) & 0x3fc0; | |
| 2328 if (b_pattern != 0 && b_pattern != 0x3fc0) { | |
| 2329 return false; | |
| 2330 } | |
| 2331 | |
| 2332 // bit[62] and bit[61] are opposite. | |
| 2333 if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) { | |
| 2334 return false; | |
| 2335 } | |
| 2336 | |
| 2337 return true; | |
| 2338 } | |
| 2339 | |
| 2340 | |
| 2341 void Assembler::GrowBuffer() { | |
| 2342 if (!own_buffer_) FATAL("external code buffer is too small"); | |
| 2343 | |
| 2344 // Compute new buffer size. | |
| 2345 CodeDesc desc; // the new buffer | |
| 2346 if (buffer_size_ < 4 * KB) { | |
| 2347 desc.buffer_size = 4 * KB; | |
| 2348 } else if (buffer_size_ < 1 * MB) { | |
| 2349 desc.buffer_size = 2 * buffer_size_; | |
| 2350 } else { | |
| 2351 desc.buffer_size = buffer_size_ + 1 * MB; | |
| 2352 } | |
| 2353 CHECK_GT(desc.buffer_size, 0); // No overflow. | |
| 2354 | |
| 2355 byte* buffer = reinterpret_cast<byte*>(buffer_); | |
| 2356 | |
| 2357 // Set up new buffer. | |
| 2358 desc.buffer = NewArray<byte>(desc.buffer_size); | |
| 2359 | |
| 2360 desc.instr_size = pc_offset(); | |
| 2361 desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos(); | |
| 2362 | |
| 2363 // Copy the data. | |
| 2364 intptr_t pc_delta = desc.buffer - buffer; | |
| 2365 intptr_t rc_delta = (desc.buffer + desc.buffer_size) - | |
| 2366 (buffer + buffer_size_); | |
| 2367 memmove(desc.buffer, buffer, desc.instr_size); | |
| 2368 memmove(reloc_info_writer.pos() + rc_delta, | |
| 2369 reloc_info_writer.pos(), desc.reloc_size); | |
| 2370 | |
| 2371 // Switch buffers. | |
| 2372 DeleteArray(buffer_); | |
| 2373 buffer_ = desc.buffer; | |
| 2374 buffer_size_ = desc.buffer_size; | |
| 2375 pc_ = reinterpret_cast<byte*>(pc_) + pc_delta; | |
| 2376 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | |
| 2377 reloc_info_writer.last_pc() + pc_delta); | |
| 2378 | |
| 2379 // None of our relocation types are pc relative pointing outside the code | |
| 2380 // buffer nor pc absolute pointing inside the code buffer, so there is no need | |
| 2381 // to relocate any emitted relocation entries. | |
| 2382 | |
| 2383 // Relocate pending relocation entries. | |
| 2384 for (int i = 0; i < num_pending_reloc_info_; i++) { | |
| 2385 RelocInfo& rinfo = pending_reloc_info_[i]; | |
| 2386 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | |
| 2387 rinfo.rmode() != RelocInfo::POSITION); | |
| 2388 if (rinfo.rmode() != RelocInfo::JS_RETURN) { | |
| 2389 rinfo.set_pc(rinfo.pc() + pc_delta); | |
| 2390 } | |
| 2391 } | |
| 2392 } | |
| 2393 | |
| 2394 | |
| 2395 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { | |
| 2396 // We do not try to reuse pool constants. | |
| 2397 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); | |
| 2398 if (((rmode >= RelocInfo::JS_RETURN) && | |
| 2399 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || | |
| 2400 (rmode == RelocInfo::CONST_POOL)) { | |
| 2401 // Adjust code for new modes. | |
| 2402 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) | |
| 2403 || RelocInfo::IsJSReturn(rmode) | |
| 2404 || RelocInfo::IsComment(rmode) | |
| 2405 || RelocInfo::IsPosition(rmode) | |
| 2406 || RelocInfo::IsConstPool(rmode)); | |
| 2407 // These modes do not need an entry in the constant pool. | |
| 2408 } else { | |
| 2409 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); | |
| 2410 if (num_pending_reloc_info_ == 0) { | |
| 2411 first_const_pool_use_ = pc_offset(); | |
| 2412 } | |
| 2413 pending_reloc_info_[num_pending_reloc_info_++] = rinfo; | |
| 2414 // Make sure the constant pool is not emitted in place of the next | |
| 2415 // instruction for which we just recorded relocation info. | |
| 2416 BlockConstPoolFor(1); | |
| 2417 } | |
| 2418 | |
| 2419 if (!RelocInfo::IsNone(rmode)) { | |
| 2420 // Don't record external references unless the heap will be serialized. | |
| 2421 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { | |
| 2422 #ifdef DEBUG | |
| 2423 if (!Serializer::enabled()) { | |
| 2424 Serializer::TooLateToEnableNow(); | |
| 2425 } | |
| 2426 #endif | |
| 2427 if (!Serializer::enabled() && !emit_debug_code()) { | |
| 2428 return; | |
| 2429 } | |
| 2430 } | |
| 2431 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here | |
| 2432 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { | |
| 2433 RelocInfo reloc_info_with_ast_id( | |
| 2434 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL); | |
| 2435 ClearRecordedAstId(); | |
| 2436 reloc_info_writer.Write(&reloc_info_with_ast_id); | |
| 2437 } else { | |
| 2438 reloc_info_writer.Write(&rinfo); | |
| 2439 } | |
| 2440 } | |
| 2441 } | |
| 2442 | |
| 2443 | |
| 2444 void Assembler::BlockConstPoolFor(int instructions) { | |
| 2445 int pc_limit = pc_offset() + instructions * kInstructionSize; | |
| 2446 if (no_const_pool_before_ < pc_limit) { | |
| 2447 // If there are some pending entries, the constant pool cannot be blocked | |
| 2448 // further than first_const_pool_use_ + kMaxDistToPool | |
| 2449 ASSERT((num_pending_reloc_info_ == 0) || | |
| 2450 (pc_limit < (first_const_pool_use_ + kMaxDistToPool))); | |
| 2451 no_const_pool_before_ = pc_limit; | |
| 2452 } | |
| 2453 | |
| 2454 if (next_buffer_check_ < no_const_pool_before_) { | |
| 2455 next_buffer_check_ = no_const_pool_before_; | |
| 2456 } | |
| 2457 } | |
| 2458 | |
| 2459 | |
| 2460 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { | |
| 2461 // Some short sequence of instruction mustn't be broken up by constant pool | |
| 2462 // emission, such sequences are protected by calls to BlockConstPoolFor and | |
| 2463 // BlockConstPoolScope. | |
| 2464 if (is_const_pool_blocked()) { | |
| 2465 // Something is wrong if emission is forced and blocked at the same time. | |
| 2466 ASSERT(!force_emit); | |
| 2467 return; | |
| 2468 } | |
| 2469 | |
| 2470 // There is nothing to do if there are no pending constant pool entries. | |
| 2471 if (num_pending_reloc_info_ == 0) { | |
| 2472 // Calculate the offset of the next check. | |
| 2473 next_buffer_check_ = pc_offset() + kCheckPoolInterval; | |
| 2474 return; | |
| 2475 } | |
| 2476 | |
| 2477 // We emit a constant pool when: | |
| 2478 // * requested to do so by parameter force_emit (e.g. after each function). | |
| 2479 // * the distance to the first instruction accessing the constant pool is | |
| 2480 // kAvgDistToPool or more. | |
| 2481 // * no jump is required and the distance to the first instruction accessing | |
| 2482 // the constant pool is at least kMaxDistToPool / 2. | |
| 2483 ASSERT(first_const_pool_use_ >= 0); | |
| 2484 int dist = pc_offset() - first_const_pool_use_; | |
| 2485 if (!force_emit && dist < kAvgDistToPool && | |
| 2486 (require_jump || (dist < (kMaxDistToPool / 2)))) { | |
| 2487 return; | |
| 2488 } | |
| 2489 | |
| 2490 Label size_check; | |
| 2491 bind(&size_check); | |
| 2492 | |
| 2493 // Check that the code buffer is large enough before emitting the constant | |
| 2494 // pool (include the jump over the pool, the constant pool marker, the | |
| 2495 // constant pool guard, and the gap to the relocation information). | |
| 2496 int jump_instr = require_jump ? kInstructionSize : 0; | |
| 2497 int size_pool_marker = kInstructionSize; | |
| 2498 int size_pool_guard = kInstructionSize; | |
| 2499 int pool_size = jump_instr + size_pool_marker + size_pool_guard + | |
| 2500 num_pending_reloc_info_ * kPointerSize; | |
| 2501 int needed_space = pool_size + kGap; | |
| 2502 while (buffer_space() <= needed_space) { | |
| 2503 GrowBuffer(); | |
| 2504 } | |
| 2505 | |
| 2506 { | |
| 2507 // Block recursive calls to CheckConstPool. | |
| 2508 BlockConstPoolScope block_const_pool(this); | |
| 2509 RecordComment("[ Constant Pool"); | |
| 2510 RecordConstPool(pool_size); | |
| 2511 | |
| 2512 // Emit jump over constant pool if necessary. | |
| 2513 Label after_pool; | |
| 2514 if (require_jump) { | |
| 2515 b(&after_pool); | |
| 2516 } | |
| 2517 | |
| 2518 // Emit a constant pool header. The header has two goals: | |
| 2519 // 1) Encode the size of the constant pool, for use by the disassembler. | |
| 2520 // 2) Terminate the program, to try to prevent execution from accidentally | |
| 2521 // flowing into the constant pool. | |
| 2522 // The header is therefore made of two a64 instructions: | |
| 2523 // ldr xzr, #<size of the constant pool in 32-bit words> | |
| 2524 // blr xzr | |
| 2525 // If executed the code will likely segfault and lr will point to the | |
| 2526 // beginning of the constant pool. | |
| 2527 // TODO(all): currently each relocated constant is 64 bits, consider adding | |
| 2528 // support for 32-bit entries. | |
| 2529 ConstantPoolMarker(2 * num_pending_reloc_info_); | |
| 2530 ConstantPoolGuard(); | |
| 2531 | |
| 2532 // Emit constant pool entries. | |
| 2533 for (int i = 0; i < num_pending_reloc_info_; i++) { | |
| 2534 RelocInfo& rinfo = pending_reloc_info_[i]; | |
| 2535 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | |
| 2536 rinfo.rmode() != RelocInfo::POSITION && | |
| 2537 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && | |
| 2538 rinfo.rmode() != RelocInfo::CONST_POOL); | |
| 2539 | |
| 2540 Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc()); | |
| 2541 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. | |
| 2542 ASSERT(instr->IsLdrLiteral() && | |
| 2543 instr->ImmLLiteral() == 0); | |
| 2544 | |
| 2545 instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); | |
| 2546 dc64(rinfo.data()); | |
| 2547 } | |
| 2548 | |
| 2549 num_pending_reloc_info_ = 0; | |
| 2550 first_const_pool_use_ = -1; | |
| 2551 | |
| 2552 RecordComment("]"); | |
| 2553 | |
| 2554 if (after_pool.is_linked()) { | |
| 2555 bind(&after_pool); | |
| 2556 } | |
| 2557 } | |
| 2558 | |
| 2559 // Since a constant pool was just emitted, move the check offset forward by | |
| 2560 // the standard interval. | |
| 2561 next_buffer_check_ = pc_offset() + kCheckPoolInterval; | |
| 2562 | |
| 2563 ASSERT(SizeOfCodeGeneratedSince(&size_check) == | |
| 2564 static_cast<unsigned>(pool_size)); | |
| 2565 } | |
| 2566 | |
| 2567 | |
| 2568 void Assembler::RecordComment(const char* msg) { | |
| 2569 if (FLAG_code_comments) { | |
| 2570 CheckBuffer(); | |
| 2571 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); | |
| 2572 } | |
| 2573 } | |
| 2574 | |
| 2575 | |
| 2576 int Assembler::buffer_space() const { | |
| 2577 return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_); | |
| 2578 } | |
| 2579 | |
| 2580 | |
| 2581 void Assembler::RecordJSReturn() { | |
| 2582 positions_recorder()->WriteRecordedPositions(); | |
| 2583 CheckBuffer(); | |
| 2584 RecordRelocInfo(RelocInfo::JS_RETURN); | |
| 2585 } | |
| 2586 | |
| 2587 | |
| 2588 void Assembler::RecordDebugBreakSlot() { | |
| 2589 positions_recorder()->WriteRecordedPositions(); | |
| 2590 CheckBuffer(); | |
| 2591 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); | |
| 2592 } | |
| 2593 | |
| 2594 | |
| 2595 void Assembler::RecordConstPool(int size) { | |
| 2596 // We only need this for debugger support, to correctly compute offsets in the | |
| 2597 // code. | |
| 2598 #ifdef ENABLE_DEBUGGER_SUPPORT | |
| 2599 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); | |
| 2600 #endif | |
| 2601 } | |
| 2602 | |
| 2603 | |
| 2604 } } // namespace v8::internal | |
| 2605 | |
| 2606 #endif // V8_TARGET_ARCH_A64 | |
| OLD | NEW |