| OLD | NEW | 
| (Empty) |  | 
 |     1 // Copyright 2013 the V8 project authors. All rights reserved. | 
 |     2 // | 
 |     3 // Redistribution and use in source and binary forms, with or without | 
 |     4 // modification, are permitted provided that the following conditions are | 
 |     5 // met: | 
 |     6 // | 
 |     7 //     * Redistributions of source code must retain the above copyright | 
 |     8 //       notice, this list of conditions and the following disclaimer. | 
 |     9 //     * Redistributions in binary form must reproduce the above | 
 |    10 //       copyright notice, this list of conditions and the following | 
 |    11 //       disclaimer in the documentation and/or other materials provided | 
 |    12 //       with the distribution. | 
 |    13 //     * Neither the name of Google Inc. nor the names of its | 
 |    14 //       contributors may be used to endorse or promote products derived | 
 |    15 //       from this software without specific prior written permission. | 
 |    16 // | 
 |    17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 
 |    18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 
 |    19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 
 |    20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 
 |    21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 
 |    22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 
 |    23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 
 |    24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 
 |    25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
 |    26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
 |    27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
 |    28  | 
 |    29 #include "v8.h" | 
 |    30  | 
 |    31 #if V8_TARGET_ARCH_A64 | 
 |    32  | 
 |    33 #define A64_DEFINE_REG_STATICS | 
 |    34  | 
 |    35 #include "a64/assembler-a64-inl.h" | 
 |    36  | 
 |    37 namespace v8 { | 
 |    38 namespace internal { | 
 |    39  | 
 |    40  | 
 |    41 // ----------------------------------------------------------------------------- | 
 |    42 // CpuFeatures utilities (for V8 compatibility). | 
 |    43  | 
 |    44 ExternalReference ExternalReference::cpu_features() { | 
 |    45   return ExternalReference(&CpuFeatures::supported_); | 
 |    46 } | 
 |    47  | 
 |    48  | 
 |    49 // ----------------------------------------------------------------------------- | 
 |    50 // CPURegList utilities. | 
 |    51  | 
 |    52 CPURegister CPURegList::PopLowestIndex() { | 
 |    53   ASSERT(IsValid()); | 
 |    54   if (IsEmpty()) { | 
 |    55     return NoCPUReg; | 
 |    56   } | 
 |    57   int index = CountTrailingZeros(list_, kRegListSizeInBits); | 
 |    58   ASSERT((1 << index) & list_); | 
 |    59   Remove(index); | 
 |    60   return CPURegister::Create(index, size_, type_); | 
 |    61 } | 
 |    62  | 
 |    63  | 
 |    64 CPURegister CPURegList::PopHighestIndex() { | 
 |    65   ASSERT(IsValid()); | 
 |    66   if (IsEmpty()) { | 
 |    67     return NoCPUReg; | 
 |    68   } | 
 |    69   int index = CountLeadingZeros(list_, kRegListSizeInBits); | 
 |    70   index = kRegListSizeInBits - 1 - index; | 
 |    71   ASSERT((1 << index) & list_); | 
 |    72   Remove(index); | 
 |    73   return CPURegister::Create(index, size_, type_); | 
 |    74 } | 
 |    75  | 
 |    76  | 
 |    77 void CPURegList::RemoveCalleeSaved() { | 
 |    78   if (type() == CPURegister::kRegister) { | 
 |    79     Remove(GetCalleeSaved(RegisterSizeInBits())); | 
 |    80   } else if (type() == CPURegister::kFPRegister) { | 
 |    81     Remove(GetCalleeSavedFP(RegisterSizeInBits())); | 
 |    82   } else { | 
 |    83     ASSERT(type() == CPURegister::kNoRegister); | 
 |    84     ASSERT(IsEmpty()); | 
 |    85     // The list must already be empty, so do nothing. | 
 |    86   } | 
 |    87 } | 
 |    88  | 
 |    89  | 
 |    90 CPURegList CPURegList::GetCalleeSaved(unsigned size) { | 
 |    91   return CPURegList(CPURegister::kRegister, size, 19, 29); | 
 |    92 } | 
 |    93  | 
 |    94  | 
 |    95 CPURegList CPURegList::GetCalleeSavedFP(unsigned size) { | 
 |    96   return CPURegList(CPURegister::kFPRegister, size, 8, 15); | 
 |    97 } | 
 |    98  | 
 |    99  | 
 |   100 CPURegList CPURegList::GetCallerSaved(unsigned size) { | 
 |   101   // Registers x0-x18 and lr (x30) are caller-saved. | 
 |   102   CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18); | 
 |   103   list.Combine(lr); | 
 |   104   return list; | 
 |   105 } | 
 |   106  | 
 |   107  | 
 |   108 CPURegList CPURegList::GetCallerSavedFP(unsigned size) { | 
 |   109   // Registers d0-d7 and d16-d31 are caller-saved. | 
 |   110   CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7); | 
 |   111   list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31)); | 
 |   112   return list; | 
 |   113 } | 
 |   114  | 
 |   115  | 
 |   116 // This function defines the list of registers which are associated with a | 
 |   117 // safepoint slot. Safepoint register slots are saved contiguously on the stack. | 
 |   118 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register | 
 |   119 // code to index in the safepoint register slots. Any change here can affect | 
 |   120 // this mapping. | 
 |   121 CPURegList CPURegList::GetSafepointSavedRegisters() { | 
 |   122   CPURegList list = CPURegList::GetCalleeSaved(); | 
 |   123   list.Combine(CPURegList(CPURegister::kRegister, kXRegSize, kJSCallerSaved)); | 
 |   124  | 
 |   125   // Note that unfortunately we can't use symbolic names for registers and have | 
 |   126   // to directly use register codes. This is because this function is used to | 
 |   127   // initialize some static variables and we can't rely on register variables | 
 |   128   // to be initialized due to static initialization order issues in C++. | 
 |   129  | 
 |   130   // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be | 
 |   131   // preserved outside of the macro assembler. | 
 |   132   list.Remove(16); | 
 |   133   list.Remove(17); | 
 |   134  | 
 |   135   // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it | 
 |   136   // is a caller-saved register according to the procedure call standard. | 
 |   137   list.Combine(18); | 
 |   138  | 
 |   139   // Drop jssp as the stack pointer doesn't need to be included. | 
 |   140   list.Remove(28); | 
 |   141  | 
 |   142   // Add the link register (x30) to the safepoint list. | 
 |   143   list.Combine(30); | 
 |   144  | 
 |   145   return list; | 
 |   146 } | 
 |   147  | 
 |   148  | 
 |   149 // ----------------------------------------------------------------------------- | 
 |   150 // Implementation of RelocInfo | 
 |   151  | 
 |   152 const int RelocInfo::kApplyMask = 0; | 
 |   153  | 
 |   154  | 
 |   155 bool RelocInfo::IsCodedSpecially() { | 
 |   156   // The deserializer needs to know whether a pointer is specially coded. Being | 
 |   157   // specially coded on A64 means that it is a movz/movk sequence. We don't | 
 |   158   // generate those for relocatable pointers. | 
 |   159   return false; | 
 |   160 } | 
 |   161  | 
 |   162  | 
 |   163 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { | 
 |   164   // Patch the code at the current address with the supplied instructions. | 
 |   165   Instr* pc = reinterpret_cast<Instr*>(pc_); | 
 |   166   Instr* instr = reinterpret_cast<Instr*>(instructions); | 
 |   167   for (int i = 0; i < instruction_count; i++) { | 
 |   168     *(pc + i) = *(instr + i); | 
 |   169   } | 
 |   170  | 
 |   171   // Indicate that code has changed. | 
 |   172   CPU::FlushICache(pc_, instruction_count * kInstructionSize); | 
 |   173 } | 
 |   174  | 
 |   175  | 
 |   176 // Patch the code at the current PC with a call to the target address. | 
 |   177 // Additional guard instructions can be added if required. | 
 |   178 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { | 
 |   179   UNIMPLEMENTED(); | 
 |   180 } | 
 |   181  | 
 |   182  | 
 |   183 bool AreAliased(const CPURegister& reg1, const CPURegister& reg2, | 
 |   184                 const CPURegister& reg3, const CPURegister& reg4, | 
 |   185                 const CPURegister& reg5, const CPURegister& reg6, | 
 |   186                 const CPURegister& reg7, const CPURegister& reg8) { | 
 |   187   int number_of_valid_regs = 0; | 
 |   188   int number_of_valid_fpregs = 0; | 
 |   189  | 
 |   190   RegList unique_regs = 0; | 
 |   191   RegList unique_fpregs = 0; | 
 |   192  | 
 |   193   const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; | 
 |   194  | 
 |   195   for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) { | 
 |   196     if (regs[i].IsRegister()) { | 
 |   197       number_of_valid_regs++; | 
 |   198       unique_regs |= regs[i].Bit(); | 
 |   199     } else if (regs[i].IsFPRegister()) { | 
 |   200       number_of_valid_fpregs++; | 
 |   201       unique_fpregs |= regs[i].Bit(); | 
 |   202     } else { | 
 |   203       ASSERT(!regs[i].IsValid()); | 
 |   204     } | 
 |   205   } | 
 |   206  | 
 |   207   int number_of_unique_regs = | 
 |   208     CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte); | 
 |   209   int number_of_unique_fpregs = | 
 |   210     CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte); | 
 |   211  | 
 |   212   ASSERT(number_of_valid_regs >= number_of_unique_regs); | 
 |   213   ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs); | 
 |   214  | 
 |   215   return (number_of_valid_regs != number_of_unique_regs) || | 
 |   216          (number_of_valid_fpregs != number_of_unique_fpregs); | 
 |   217 } | 
 |   218  | 
 |   219  | 
 |   220 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2, | 
 |   221                         const CPURegister& reg3, const CPURegister& reg4, | 
 |   222                         const CPURegister& reg5, const CPURegister& reg6, | 
 |   223                         const CPURegister& reg7, const CPURegister& reg8) { | 
 |   224   ASSERT(reg1.IsValid()); | 
 |   225   bool match = true; | 
 |   226   match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); | 
 |   227   match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); | 
 |   228   match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); | 
 |   229   match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); | 
 |   230   match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); | 
 |   231   match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); | 
 |   232   match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); | 
 |   233   return match; | 
 |   234 } | 
 |   235  | 
 |   236  | 
 |   237 Operand::Operand(const ExternalReference& f) | 
 |   238     : immediate_(reinterpret_cast<intptr_t>(f.address())), | 
 |   239       reg_(NoReg), | 
 |   240       rmode_(RelocInfo::EXTERNAL_REFERENCE) {} | 
 |   241  | 
 |   242  | 
 |   243 Operand::Operand(Handle<Object> handle) : reg_(NoReg) { | 
 |   244   AllowDeferredHandleDereference using_raw_address; | 
 |   245  | 
 |   246   // Verify all Objects referred by code are NOT in new space. | 
 |   247   Object* obj = *handle; | 
 |   248   if (obj->IsHeapObject()) { | 
 |   249     ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); | 
 |   250     immediate_ = reinterpret_cast<intptr_t>(handle.location()); | 
 |   251     rmode_ = RelocInfo::EMBEDDED_OBJECT; | 
 |   252   } else { | 
 |   253     STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t)); | 
 |   254     immediate_ = reinterpret_cast<intptr_t>(obj); | 
 |   255     rmode_ = RelocInfo::NONE64; | 
 |   256   } | 
 |   257 } | 
 |   258  | 
 |   259  | 
 |   260 bool Operand::NeedsRelocation() const { | 
 |   261   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { | 
 |   262 #ifdef DEBUG | 
 |   263     if (!Serializer::enabled()) { | 
 |   264       Serializer::TooLateToEnableNow(); | 
 |   265     } | 
 |   266 #endif | 
 |   267     return Serializer::enabled(); | 
 |   268   } | 
 |   269  | 
 |   270   return !RelocInfo::IsNone(rmode_); | 
 |   271 } | 
 |   272  | 
 |   273  | 
 |   274 // Assembler | 
 |   275  | 
 |   276 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) | 
 |   277     : AssemblerBase(isolate, buffer, buffer_size), | 
 |   278       recorded_ast_id_(TypeFeedbackId::None()), | 
 |   279       positions_recorder_(this) { | 
 |   280   const_pool_blocked_nesting_ = 0; | 
 |   281   Reset(); | 
 |   282 } | 
 |   283  | 
 |   284  | 
 |   285 Assembler::~Assembler() { | 
 |   286   ASSERT(num_pending_reloc_info_ == 0); | 
 |   287   ASSERT(const_pool_blocked_nesting_ == 0); | 
 |   288 } | 
 |   289  | 
 |   290  | 
 |   291 void Assembler::Reset() { | 
 |   292 #ifdef DEBUG | 
 |   293   ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); | 
 |   294   ASSERT(const_pool_blocked_nesting_ == 0); | 
 |   295   memset(buffer_, 0, pc_ - buffer_); | 
 |   296 #endif | 
 |   297   pc_ = buffer_; | 
 |   298   reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), | 
 |   299                                reinterpret_cast<byte*>(pc_)); | 
 |   300   num_pending_reloc_info_ = 0; | 
 |   301   next_buffer_check_ = 0; | 
 |   302   no_const_pool_before_ = 0; | 
 |   303   first_const_pool_use_ = -1; | 
 |   304   ClearRecordedAstId(); | 
 |   305 } | 
 |   306  | 
 |   307  | 
 |   308 void Assembler::GetCode(CodeDesc* desc) { | 
 |   309   // Emit constant pool if necessary. | 
 |   310   CheckConstPool(true, false); | 
 |   311   ASSERT(num_pending_reloc_info_ == 0); | 
 |   312  | 
 |   313   // Set up code descriptor. | 
 |   314   if (desc) { | 
 |   315     desc->buffer = reinterpret_cast<byte*>(buffer_); | 
 |   316     desc->buffer_size = buffer_size_; | 
 |   317     desc->instr_size = pc_offset(); | 
 |   318     desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) - | 
 |   319                        reloc_info_writer.pos(); | 
 |   320     desc->origin = this; | 
 |   321   } | 
 |   322 } | 
 |   323  | 
 |   324  | 
 |   325 void Assembler::Align(int m) { | 
 |   326   ASSERT(m >= 4 && IsPowerOf2(m)); | 
 |   327   while ((pc_offset() & (m - 1)) != 0) { | 
 |   328     nop(); | 
 |   329   } | 
 |   330 } | 
 |   331  | 
 |   332  | 
 |   333 void Assembler::CheckLabelLinkChain(Label const * label) { | 
 |   334 #ifdef DEBUG | 
 |   335   if (label->is_linked()) { | 
 |   336     int linkoffset = label->pos(); | 
 |   337     bool start_of_chain = false; | 
 |   338     while (!start_of_chain) { | 
 |   339       Instruction * link = InstructionAt(linkoffset); | 
 |   340       int linkpcoffset = link->ImmPCOffset(); | 
 |   341       int prevlinkoffset = linkoffset + linkpcoffset; | 
 |   342  | 
 |   343       start_of_chain = (linkoffset == prevlinkoffset); | 
 |   344       linkoffset = linkoffset + linkpcoffset; | 
 |   345     } | 
 |   346   } | 
 |   347 #endif | 
 |   348 } | 
 |   349  | 
 |   350  | 
 |   351 void Assembler::bind(Label* label) { | 
 |   352   // Bind label to the address at pc_. All instructions (most likely branches) | 
 |   353   // that are linked to this label will be updated to point to the newly-bound | 
 |   354   // label. | 
 |   355  | 
 |   356   ASSERT(!label->is_near_linked()); | 
 |   357   ASSERT(!label->is_bound()); | 
 |   358  | 
 |   359   // If the label is linked, the link chain looks something like this: | 
 |   360   // | 
 |   361   // |--I----I-------I-------L | 
 |   362   // |---------------------->| pc_offset | 
 |   363   // |-------------->|         linkoffset = label->pos() | 
 |   364   //         |<------|         link->ImmPCOffset() | 
 |   365   // |------>|                 prevlinkoffset = linkoffset + link->ImmPCOffset() | 
 |   366   // | 
 |   367   // On each iteration, the last link is updated and then removed from the | 
 |   368   // chain until only one remains. At that point, the label is bound. | 
 |   369   // | 
 |   370   // If the label is not linked, no preparation is required before binding. | 
 |   371   while (label->is_linked()) { | 
 |   372     int linkoffset = label->pos(); | 
 |   373     Instruction* link = InstructionAt(linkoffset); | 
 |   374     int prevlinkoffset = linkoffset + link->ImmPCOffset(); | 
 |   375  | 
 |   376     CheckLabelLinkChain(label); | 
 |   377  | 
 |   378     ASSERT(linkoffset >= 0); | 
 |   379     ASSERT(linkoffset < pc_offset()); | 
 |   380     ASSERT((linkoffset > prevlinkoffset) || | 
 |   381            (linkoffset - prevlinkoffset == kStartOfLabelLinkChain)); | 
 |   382     ASSERT(prevlinkoffset >= 0); | 
 |   383  | 
 |   384     // Update the link to point to the label. | 
 |   385     link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); | 
 |   386  | 
 |   387     // Link the label to the previous link in the chain. | 
 |   388     if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) { | 
 |   389       // We hit kStartOfLabelLinkChain, so the chain is fully processed. | 
 |   390       label->Unuse(); | 
 |   391     } else { | 
 |   392       // Update the label for the next iteration. | 
 |   393       label->link_to(prevlinkoffset); | 
 |   394     } | 
 |   395   } | 
 |   396   label->bind_to(pc_offset()); | 
 |   397  | 
 |   398   ASSERT(label->is_bound()); | 
 |   399   ASSERT(!label->is_linked()); | 
 |   400 } | 
 |   401  | 
 |   402  | 
 |   403 int Assembler::LinkAndGetByteOffsetTo(Label* label) { | 
 |   404   ASSERT(sizeof(*pc_) == 1); | 
 |   405   CheckLabelLinkChain(label); | 
 |   406  | 
 |   407   int offset; | 
 |   408   if (label->is_bound()) { | 
 |   409     // The label is bound, so it does not need to be updated. Referring | 
 |   410     // instructions must link directly to the label as they will not be | 
 |   411     // updated. | 
 |   412     // | 
 |   413     // In this case, label->pos() returns the offset of the label from the | 
 |   414     // start of the buffer. | 
 |   415     // | 
 |   416     // Note that offset can be zero for self-referential instructions. (This | 
 |   417     // could be useful for ADR, for example.) | 
 |   418     offset = label->pos() - pc_offset(); | 
 |   419     ASSERT(offset <= 0); | 
 |   420   } else { | 
 |   421     if (label->is_linked()) { | 
 |   422       // The label is linked, so the referring instruction should be added onto | 
 |   423       // the end of the label's link chain. | 
 |   424       // | 
 |   425       // In this case, label->pos() returns the offset of the last linked | 
 |   426       // instruction from the start of the buffer. | 
 |   427       offset = label->pos() - pc_offset(); | 
 |   428       ASSERT(offset != kStartOfLabelLinkChain); | 
 |   429       // Note that the offset here needs to be PC-relative only so that the | 
 |   430       // first instruction in a buffer can link to an unbound label. Otherwise, | 
 |   431       // the offset would be 0 for this case, and 0 is reserved for | 
 |   432       // kStartOfLabelLinkChain. | 
 |   433     } else { | 
 |   434       // The label is unused, so it now becomes linked and the referring | 
 |   435       // instruction is at the start of the new link chain. | 
 |   436       offset = kStartOfLabelLinkChain; | 
 |   437     } | 
 |   438     // The instruction at pc is now the last link in the label's chain. | 
 |   439     label->link_to(pc_offset()); | 
 |   440   } | 
 |   441  | 
 |   442   return offset; | 
 |   443 } | 
 |   444  | 
 |   445  | 
 |   446 void Assembler::StartBlockConstPool() { | 
 |   447   if (const_pool_blocked_nesting_++ == 0) { | 
 |   448     // Prevent constant pool checks happening by setting the next check to | 
 |   449     // the biggest possible offset. | 
 |   450     next_buffer_check_ = kMaxInt; | 
 |   451   } | 
 |   452 } | 
 |   453  | 
 |   454  | 
 |   455 void Assembler::EndBlockConstPool() { | 
 |   456   if (--const_pool_blocked_nesting_ == 0) { | 
 |   457     // Check the constant pool hasn't been blocked for too long. | 
 |   458     ASSERT((num_pending_reloc_info_ == 0) || | 
 |   459            (pc_offset() < (first_const_pool_use_ + kMaxDistToPool))); | 
 |   460     // Two cases: | 
 |   461     //  * no_const_pool_before_ >= next_buffer_check_ and the emission is | 
 |   462     //    still blocked | 
 |   463     //  * no_const_pool_before_ < next_buffer_check_ and the next emit will | 
 |   464     //    trigger a check. | 
 |   465     next_buffer_check_ = no_const_pool_before_; | 
 |   466   } | 
 |   467 } | 
 |   468  | 
 |   469  | 
 |   470 bool Assembler::is_const_pool_blocked() const { | 
 |   471   return (const_pool_blocked_nesting_ > 0) || | 
 |   472          (pc_offset() < no_const_pool_before_); | 
 |   473 } | 
 |   474  | 
 |   475  | 
 |   476 bool Assembler::IsConstantPoolAt(Instruction* instr) { | 
 |   477   // The constant pool marker is made of two instructions. These instructions | 
 |   478   // will never be emitted by the JIT, so checking for the first one is enough: | 
 |   479   // 0: ldr xzr, #<size of pool> | 
 |   480   bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code()); | 
 |   481  | 
 |   482   // It is still worth asserting the marker is complete. | 
 |   483   // 4: blr xzr | 
 |   484   ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() && | 
 |   485                      instr->following()->Rn() == xzr.code())); | 
 |   486  | 
 |   487   return result; | 
 |   488 } | 
 |   489  | 
 |   490  | 
 |   491 int Assembler::ConstantPoolSizeAt(Instruction* instr) { | 
 |   492   if (IsConstantPoolAt(instr)) { | 
 |   493     return instr->ImmLLiteral(); | 
 |   494   } else { | 
 |   495     return -1; | 
 |   496   } | 
 |   497 } | 
 |   498  | 
 |   499  | 
 |   500 void Assembler::ConstantPoolMarker(uint32_t size) { | 
 |   501   ASSERT(is_const_pool_blocked()); | 
 |   502   // + 1 is for the crash guard. | 
 |   503   Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr)); | 
 |   504 } | 
 |   505  | 
 |   506  | 
 |   507 void Assembler::ConstantPoolGuard() { | 
 |   508 #ifdef DEBUG | 
 |   509   // Currently this is only used after a constant pool marker. | 
 |   510   ASSERT(is_const_pool_blocked()); | 
 |   511   Instruction* instr = reinterpret_cast<Instruction*>(pc_); | 
 |   512   ASSERT(instr->preceding()->IsLdrLiteralX() && | 
 |   513          instr->preceding()->Rt() == xzr.code()); | 
 |   514 #endif | 
 |   515  | 
 |   516   // Crash by branching to 0. lr now points near the fault. | 
 |   517   // TODO(all): update the simulator to trap this pattern. | 
 |   518   Emit(BLR | Rn(xzr)); | 
 |   519 } | 
 |   520  | 
 |   521  | 
 |   522 void Assembler::br(const Register& xn) { | 
 |   523   positions_recorder()->WriteRecordedPositions(); | 
 |   524   ASSERT(xn.Is64Bits()); | 
 |   525   Emit(BR | Rn(xn)); | 
 |   526 } | 
 |   527  | 
 |   528  | 
 |   529 void Assembler::blr(const Register& xn) { | 
 |   530   positions_recorder()->WriteRecordedPositions(); | 
 |   531   ASSERT(xn.Is64Bits()); | 
 |   532   // The pattern 'blr xzr' is used as a guard to detect when execution falls | 
 |   533   // through the constant pool. It should not be emitted. | 
 |   534   ASSERT(!xn.Is(xzr)); | 
 |   535   Emit(BLR | Rn(xn)); | 
 |   536 } | 
 |   537  | 
 |   538  | 
 |   539 void Assembler::ret(const Register& xn) { | 
 |   540   positions_recorder()->WriteRecordedPositions(); | 
 |   541   ASSERT(xn.Is64Bits()); | 
 |   542   Emit(RET | Rn(xn)); | 
 |   543 } | 
 |   544  | 
 |   545  | 
 |   546 void Assembler::b(int imm26) { | 
 |   547   Emit(B | ImmUncondBranch(imm26)); | 
 |   548 } | 
 |   549  | 
 |   550  | 
 |   551 void Assembler::b(Label* label) { | 
 |   552   positions_recorder()->WriteRecordedPositions(); | 
 |   553   b(LinkAndGetInstructionOffsetTo(label)); | 
 |   554 } | 
 |   555  | 
 |   556  | 
 |   557 void Assembler::b(int imm19, Condition cond) { | 
 |   558   Emit(B_cond | ImmCondBranch(imm19) | cond); | 
 |   559 } | 
 |   560  | 
 |   561  | 
 |   562 void Assembler::b(Label* label, Condition cond) { | 
 |   563   positions_recorder()->WriteRecordedPositions(); | 
 |   564   b(LinkAndGetInstructionOffsetTo(label), cond); | 
 |   565 } | 
 |   566  | 
 |   567  | 
 |   568 void Assembler::bl(int imm26) { | 
 |   569   positions_recorder()->WriteRecordedPositions(); | 
 |   570   Emit(BL | ImmUncondBranch(imm26)); | 
 |   571 } | 
 |   572  | 
 |   573  | 
 |   574 void Assembler::bl(Label* label) { | 
 |   575   positions_recorder()->WriteRecordedPositions(); | 
 |   576   bl(LinkAndGetInstructionOffsetTo(label)); | 
 |   577 } | 
 |   578  | 
 |   579  | 
 |   580 void Assembler::cbz(const Register& rt, | 
 |   581                     int imm19) { | 
 |   582   positions_recorder()->WriteRecordedPositions(); | 
 |   583   Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); | 
 |   584 } | 
 |   585  | 
 |   586  | 
 |   587 void Assembler::cbz(const Register& rt, | 
 |   588                     Label* label) { | 
 |   589   positions_recorder()->WriteRecordedPositions(); | 
 |   590   cbz(rt, LinkAndGetInstructionOffsetTo(label)); | 
 |   591 } | 
 |   592  | 
 |   593  | 
 |   594 void Assembler::cbnz(const Register& rt, | 
 |   595                      int imm19) { | 
 |   596   positions_recorder()->WriteRecordedPositions(); | 
 |   597   Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); | 
 |   598 } | 
 |   599  | 
 |   600  | 
 |   601 void Assembler::cbnz(const Register& rt, | 
 |   602                      Label* label) { | 
 |   603   positions_recorder()->WriteRecordedPositions(); | 
 |   604   cbnz(rt, LinkAndGetInstructionOffsetTo(label)); | 
 |   605 } | 
 |   606  | 
 |   607  | 
 |   608 void Assembler::tbz(const Register& rt, | 
 |   609                     unsigned bit_pos, | 
 |   610                     int imm14) { | 
 |   611   positions_recorder()->WriteRecordedPositions(); | 
 |   612   ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); | 
 |   613   Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); | 
 |   614 } | 
 |   615  | 
 |   616  | 
 |   617 void Assembler::tbz(const Register& rt, | 
 |   618                     unsigned bit_pos, | 
 |   619                     Label* label) { | 
 |   620   positions_recorder()->WriteRecordedPositions(); | 
 |   621   tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); | 
 |   622 } | 
 |   623  | 
 |   624  | 
 |   625 void Assembler::tbnz(const Register& rt, | 
 |   626                      unsigned bit_pos, | 
 |   627                      int imm14) { | 
 |   628   positions_recorder()->WriteRecordedPositions(); | 
 |   629   ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); | 
 |   630   Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); | 
 |   631 } | 
 |   632  | 
 |   633  | 
 |   634 void Assembler::tbnz(const Register& rt, | 
 |   635                      unsigned bit_pos, | 
 |   636                      Label* label) { | 
 |   637   positions_recorder()->WriteRecordedPositions(); | 
 |   638   tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); | 
 |   639 } | 
 |   640  | 
 |   641  | 
 |   642 void Assembler::adr(const Register& rd, int imm21) { | 
 |   643   ASSERT(rd.Is64Bits()); | 
 |   644   Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd)); | 
 |   645 } | 
 |   646  | 
 |   647  | 
 |   648 void Assembler::adr(const Register& rd, Label* label) { | 
 |   649   adr(rd, LinkAndGetByteOffsetTo(label)); | 
 |   650 } | 
 |   651  | 
 |   652  | 
 |   653 void Assembler::add(const Register& rd, | 
 |   654                     const Register& rn, | 
 |   655                     const Operand& operand) { | 
 |   656   AddSub(rd, rn, operand, LeaveFlags, ADD); | 
 |   657 } | 
 |   658  | 
 |   659  | 
 |   660 void Assembler::adds(const Register& rd, | 
 |   661                      const Register& rn, | 
 |   662                      const Operand& operand) { | 
 |   663   AddSub(rd, rn, operand, SetFlags, ADD); | 
 |   664 } | 
 |   665  | 
 |   666  | 
 |   667 void Assembler::cmn(const Register& rn, | 
 |   668                     const Operand& operand) { | 
 |   669   Register zr = AppropriateZeroRegFor(rn); | 
 |   670   adds(zr, rn, operand); | 
 |   671 } | 
 |   672  | 
 |   673  | 
 |   674 void Assembler::sub(const Register& rd, | 
 |   675                     const Register& rn, | 
 |   676                     const Operand& operand) { | 
 |   677   AddSub(rd, rn, operand, LeaveFlags, SUB); | 
 |   678 } | 
 |   679  | 
 |   680  | 
 |   681 void Assembler::subs(const Register& rd, | 
 |   682                      const Register& rn, | 
 |   683                      const Operand& operand) { | 
 |   684   AddSub(rd, rn, operand, SetFlags, SUB); | 
 |   685 } | 
 |   686  | 
 |   687  | 
 |   688 void Assembler::cmp(const Register& rn, const Operand& operand) { | 
 |   689   Register zr = AppropriateZeroRegFor(rn); | 
 |   690   subs(zr, rn, operand); | 
 |   691 } | 
 |   692  | 
 |   693  | 
 |   694 void Assembler::neg(const Register& rd, const Operand& operand) { | 
 |   695   Register zr = AppropriateZeroRegFor(rd); | 
 |   696   sub(rd, zr, operand); | 
 |   697 } | 
 |   698  | 
 |   699  | 
 |   700 void Assembler::negs(const Register& rd, const Operand& operand) { | 
 |   701   Register zr = AppropriateZeroRegFor(rd); | 
 |   702   subs(rd, zr, operand); | 
 |   703 } | 
 |   704  | 
 |   705  | 
 |   706 void Assembler::adc(const Register& rd, | 
 |   707                     const Register& rn, | 
 |   708                     const Operand& operand) { | 
 |   709   AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); | 
 |   710 } | 
 |   711  | 
 |   712  | 
 |   713 void Assembler::adcs(const Register& rd, | 
 |   714                      const Register& rn, | 
 |   715                      const Operand& operand) { | 
 |   716   AddSubWithCarry(rd, rn, operand, SetFlags, ADC); | 
 |   717 } | 
 |   718  | 
 |   719  | 
 |   720 void Assembler::sbc(const Register& rd, | 
 |   721                     const Register& rn, | 
 |   722                     const Operand& operand) { | 
 |   723   AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); | 
 |   724 } | 
 |   725  | 
 |   726  | 
 |   727 void Assembler::sbcs(const Register& rd, | 
 |   728                      const Register& rn, | 
 |   729                      const Operand& operand) { | 
 |   730   AddSubWithCarry(rd, rn, operand, SetFlags, SBC); | 
 |   731 } | 
 |   732  | 
 |   733  | 
 |   734 void Assembler::ngc(const Register& rd, const Operand& operand) { | 
 |   735   Register zr = AppropriateZeroRegFor(rd); | 
 |   736   sbc(rd, zr, operand); | 
 |   737 } | 
 |   738  | 
 |   739  | 
 |   740 void Assembler::ngcs(const Register& rd, const Operand& operand) { | 
 |   741   Register zr = AppropriateZeroRegFor(rd); | 
 |   742   sbcs(rd, zr, operand); | 
 |   743 } | 
 |   744  | 
 |   745  | 
 |   746 // Logical instructions. | 
 |   747 void Assembler::and_(const Register& rd, | 
 |   748                      const Register& rn, | 
 |   749                      const Operand& operand) { | 
 |   750   Logical(rd, rn, operand, AND); | 
 |   751 } | 
 |   752  | 
 |   753  | 
 |   754 void Assembler::ands(const Register& rd, | 
 |   755                      const Register& rn, | 
 |   756                      const Operand& operand) { | 
 |   757   Logical(rd, rn, operand, ANDS); | 
 |   758 } | 
 |   759  | 
 |   760  | 
 |   761 void Assembler::tst(const Register& rn, | 
 |   762                     const Operand& operand) { | 
 |   763   ands(AppropriateZeroRegFor(rn), rn, operand); | 
 |   764 } | 
 |   765  | 
 |   766  | 
 |   767 void Assembler::bic(const Register& rd, | 
 |   768                     const Register& rn, | 
 |   769                     const Operand& operand) { | 
 |   770   Logical(rd, rn, operand, BIC); | 
 |   771 } | 
 |   772  | 
 |   773  | 
 |   774 void Assembler::bics(const Register& rd, | 
 |   775                      const Register& rn, | 
 |   776                      const Operand& operand) { | 
 |   777   Logical(rd, rn, operand, BICS); | 
 |   778 } | 
 |   779  | 
 |   780  | 
 |   781 void Assembler::orr(const Register& rd, | 
 |   782                     const Register& rn, | 
 |   783                     const Operand& operand) { | 
 |   784   Logical(rd, rn, operand, ORR); | 
 |   785 } | 
 |   786  | 
 |   787  | 
 |   788 void Assembler::orn(const Register& rd, | 
 |   789                     const Register& rn, | 
 |   790                     const Operand& operand) { | 
 |   791   Logical(rd, rn, operand, ORN); | 
 |   792 } | 
 |   793  | 
 |   794  | 
 |   795 void Assembler::eor(const Register& rd, | 
 |   796                     const Register& rn, | 
 |   797                     const Operand& operand) { | 
 |   798   Logical(rd, rn, operand, EOR); | 
 |   799 } | 
 |   800  | 
 |   801  | 
 |   802 void Assembler::eon(const Register& rd, | 
 |   803                     const Register& rn, | 
 |   804                     const Operand& operand) { | 
 |   805   Logical(rd, rn, operand, EON); | 
 |   806 } | 
 |   807  | 
 |   808  | 
 |   809 void Assembler::lslv(const Register& rd, | 
 |   810                      const Register& rn, | 
 |   811                      const Register& rm) { | 
 |   812   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |   813   ASSERT(rd.SizeInBits() == rm.SizeInBits()); | 
 |   814   Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); | 
 |   815 } | 
 |   816  | 
 |   817  | 
 |   818 void Assembler::lsrv(const Register& rd, | 
 |   819                      const Register& rn, | 
 |   820                      const Register& rm) { | 
 |   821   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |   822   ASSERT(rd.SizeInBits() == rm.SizeInBits()); | 
 |   823   Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); | 
 |   824 } | 
 |   825  | 
 |   826  | 
 |   827 void Assembler::asrv(const Register& rd, | 
 |   828                      const Register& rn, | 
 |   829                      const Register& rm) { | 
 |   830   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |   831   ASSERT(rd.SizeInBits() == rm.SizeInBits()); | 
 |   832   Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); | 
 |   833 } | 
 |   834  | 
 |   835  | 
 |   836 void Assembler::rorv(const Register& rd, | 
 |   837                      const Register& rn, | 
 |   838                      const Register& rm) { | 
 |   839   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |   840   ASSERT(rd.SizeInBits() == rm.SizeInBits()); | 
 |   841   Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); | 
 |   842 } | 
 |   843  | 
 |   844  | 
 |   845 // Bitfield operations. | 
 |   846 void Assembler::bfm(const Register& rd, | 
 |   847                      const Register& rn, | 
 |   848                      unsigned immr, | 
 |   849                      unsigned imms) { | 
 |   850   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |   851   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); | 
 |   852   Emit(SF(rd) | BFM | N | | 
 |   853        ImmR(immr, rd.SizeInBits()) | | 
 |   854        ImmS(imms, rn.SizeInBits()) | | 
 |   855        Rn(rn) | Rd(rd)); | 
 |   856 } | 
 |   857  | 
 |   858  | 
 |   859 void Assembler::sbfm(const Register& rd, | 
 |   860                      const Register& rn, | 
 |   861                      unsigned immr, | 
 |   862                      unsigned imms) { | 
 |   863   ASSERT(rd.Is64Bits() || rn.Is32Bits()); | 
 |   864   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); | 
 |   865   Emit(SF(rd) | SBFM | N | | 
 |   866        ImmR(immr, rd.SizeInBits()) | | 
 |   867        ImmS(imms, rn.SizeInBits()) | | 
 |   868        Rn(rn) | Rd(rd)); | 
 |   869 } | 
 |   870  | 
 |   871  | 
 |   872 void Assembler::ubfm(const Register& rd, | 
 |   873                      const Register& rn, | 
 |   874                      unsigned immr, | 
 |   875                      unsigned imms) { | 
 |   876   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |   877   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); | 
 |   878   Emit(SF(rd) | UBFM | N | | 
 |   879        ImmR(immr, rd.SizeInBits()) | | 
 |   880        ImmS(imms, rn.SizeInBits()) | | 
 |   881        Rn(rn) | Rd(rd)); | 
 |   882 } | 
 |   883  | 
 |   884  | 
 |   885 void Assembler::extr(const Register& rd, | 
 |   886                      const Register& rn, | 
 |   887                      const Register& rm, | 
 |   888                      unsigned lsb) { | 
 |   889   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |   890   ASSERT(rd.SizeInBits() == rm.SizeInBits()); | 
 |   891   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); | 
 |   892   Emit(SF(rd) | EXTR | N | Rm(rm) | | 
 |   893        ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd)); | 
 |   894 } | 
 |   895  | 
 |   896  | 
 |   897 void Assembler::csel(const Register& rd, | 
 |   898                      const Register& rn, | 
 |   899                      const Register& rm, | 
 |   900                      Condition cond) { | 
 |   901   ConditionalSelect(rd, rn, rm, cond, CSEL); | 
 |   902 } | 
 |   903  | 
 |   904  | 
 |   905 void Assembler::csinc(const Register& rd, | 
 |   906                       const Register& rn, | 
 |   907                       const Register& rm, | 
 |   908                       Condition cond) { | 
 |   909   ConditionalSelect(rd, rn, rm, cond, CSINC); | 
 |   910 } | 
 |   911  | 
 |   912  | 
 |   913 void Assembler::csinv(const Register& rd, | 
 |   914                       const Register& rn, | 
 |   915                       const Register& rm, | 
 |   916                       Condition cond) { | 
 |   917   ConditionalSelect(rd, rn, rm, cond, CSINV); | 
 |   918 } | 
 |   919  | 
 |   920  | 
 |   921 void Assembler::csneg(const Register& rd, | 
 |   922                       const Register& rn, | 
 |   923                       const Register& rm, | 
 |   924                       Condition cond) { | 
 |   925   ConditionalSelect(rd, rn, rm, cond, CSNEG); | 
 |   926 } | 
 |   927  | 
 |   928  | 
 |   929 void Assembler::cset(const Register &rd, Condition cond) { | 
 |   930   ASSERT((cond != al) && (cond != nv)); | 
 |   931   Register zr = AppropriateZeroRegFor(rd); | 
 |   932   csinc(rd, zr, zr, InvertCondition(cond)); | 
 |   933 } | 
 |   934  | 
 |   935  | 
 |   936 void Assembler::csetm(const Register &rd, Condition cond) { | 
 |   937   ASSERT((cond != al) && (cond != nv)); | 
 |   938   Register zr = AppropriateZeroRegFor(rd); | 
 |   939   csinv(rd, zr, zr, InvertCondition(cond)); | 
 |   940 } | 
 |   941  | 
 |   942  | 
 |   943 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) { | 
 |   944   ASSERT((cond != al) && (cond != nv)); | 
 |   945   csinc(rd, rn, rn, InvertCondition(cond)); | 
 |   946 } | 
 |   947  | 
 |   948  | 
 |   949 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) { | 
 |   950   ASSERT((cond != al) && (cond != nv)); | 
 |   951   csinv(rd, rn, rn, InvertCondition(cond)); | 
 |   952 } | 
 |   953  | 
 |   954  | 
 |   955 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) { | 
 |   956   ASSERT((cond != al) && (cond != nv)); | 
 |   957   csneg(rd, rn, rn, InvertCondition(cond)); | 
 |   958 } | 
 |   959  | 
 |   960  | 
 |   961 void Assembler::ConditionalSelect(const Register& rd, | 
 |   962                                   const Register& rn, | 
 |   963                                   const Register& rm, | 
 |   964                                   Condition cond, | 
 |   965                                   ConditionalSelectOp op) { | 
 |   966   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |   967   ASSERT(rd.SizeInBits() == rm.SizeInBits()); | 
 |   968   Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); | 
 |   969 } | 
 |   970  | 
 |   971  | 
 |   972 void Assembler::ccmn(const Register& rn, | 
 |   973                      const Operand& operand, | 
 |   974                      StatusFlags nzcv, | 
 |   975                      Condition cond) { | 
 |   976   ConditionalCompare(rn, operand, nzcv, cond, CCMN); | 
 |   977 } | 
 |   978  | 
 |   979  | 
 |   980 void Assembler::ccmp(const Register& rn, | 
 |   981                      const Operand& operand, | 
 |   982                      StatusFlags nzcv, | 
 |   983                      Condition cond) { | 
 |   984   ConditionalCompare(rn, operand, nzcv, cond, CCMP); | 
 |   985 } | 
 |   986  | 
 |   987  | 
 |   988 void Assembler::DataProcessing3Source(const Register& rd, | 
 |   989                                       const Register& rn, | 
 |   990                                       const Register& rm, | 
 |   991                                       const Register& ra, | 
 |   992                                       DataProcessing3SourceOp op) { | 
 |   993   Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); | 
 |   994 } | 
 |   995  | 
 |   996  | 
 |   997 void Assembler::mul(const Register& rd, | 
 |   998                     const Register& rn, | 
 |   999                     const Register& rm) { | 
 |  1000   ASSERT(AreSameSizeAndType(rd, rn, rm)); | 
 |  1001   Register zr = AppropriateZeroRegFor(rn); | 
 |  1002   DataProcessing3Source(rd, rn, rm, zr, MADD); | 
 |  1003 } | 
 |  1004  | 
 |  1005  | 
 |  1006 void Assembler::madd(const Register& rd, | 
 |  1007                      const Register& rn, | 
 |  1008                      const Register& rm, | 
 |  1009                      const Register& ra) { | 
 |  1010   ASSERT(AreSameSizeAndType(rd, rn, rm, ra)); | 
 |  1011   DataProcessing3Source(rd, rn, rm, ra, MADD); | 
 |  1012 } | 
 |  1013  | 
 |  1014  | 
 |  1015 void Assembler::mneg(const Register& rd, | 
 |  1016                      const Register& rn, | 
 |  1017                      const Register& rm) { | 
 |  1018   ASSERT(AreSameSizeAndType(rd, rn, rm)); | 
 |  1019   Register zr = AppropriateZeroRegFor(rn); | 
 |  1020   DataProcessing3Source(rd, rn, rm, zr, MSUB); | 
 |  1021 } | 
 |  1022  | 
 |  1023  | 
 |  1024 void Assembler::msub(const Register& rd, | 
 |  1025                      const Register& rn, | 
 |  1026                      const Register& rm, | 
 |  1027                      const Register& ra) { | 
 |  1028   ASSERT(AreSameSizeAndType(rd, rn, rm, ra)); | 
 |  1029   DataProcessing3Source(rd, rn, rm, ra, MSUB); | 
 |  1030 } | 
 |  1031  | 
 |  1032  | 
 |  1033 void Assembler::smaddl(const Register& rd, | 
 |  1034                        const Register& rn, | 
 |  1035                        const Register& rm, | 
 |  1036                        const Register& ra) { | 
 |  1037   ASSERT(rd.Is64Bits() && ra.Is64Bits()); | 
 |  1038   ASSERT(rn.Is32Bits() && rm.Is32Bits()); | 
 |  1039   DataProcessing3Source(rd, rn, rm, ra, SMADDL_x); | 
 |  1040 } | 
 |  1041  | 
 |  1042  | 
 |  1043 void Assembler::smsubl(const Register& rd, | 
 |  1044                        const Register& rn, | 
 |  1045                        const Register& rm, | 
 |  1046                        const Register& ra) { | 
 |  1047   ASSERT(rd.Is64Bits() && ra.Is64Bits()); | 
 |  1048   ASSERT(rn.Is32Bits() && rm.Is32Bits()); | 
 |  1049   DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x); | 
 |  1050 } | 
 |  1051  | 
 |  1052  | 
 |  1053 void Assembler::umaddl(const Register& rd, | 
 |  1054                        const Register& rn, | 
 |  1055                        const Register& rm, | 
 |  1056                        const Register& ra) { | 
 |  1057   ASSERT(rd.Is64Bits() && ra.Is64Bits()); | 
 |  1058   ASSERT(rn.Is32Bits() && rm.Is32Bits()); | 
 |  1059   DataProcessing3Source(rd, rn, rm, ra, UMADDL_x); | 
 |  1060 } | 
 |  1061  | 
 |  1062  | 
 |  1063 void Assembler::umsubl(const Register& rd, | 
 |  1064                        const Register& rn, | 
 |  1065                        const Register& rm, | 
 |  1066                        const Register& ra) { | 
 |  1067   ASSERT(rd.Is64Bits() && ra.Is64Bits()); | 
 |  1068   ASSERT(rn.Is32Bits() && rm.Is32Bits()); | 
 |  1069   DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x); | 
 |  1070 } | 
 |  1071  | 
 |  1072  | 
 |  1073 void Assembler::smull(const Register& rd, | 
 |  1074                       const Register& rn, | 
 |  1075                       const Register& rm) { | 
 |  1076   ASSERT(rd.Is64Bits()); | 
 |  1077   ASSERT(rn.Is32Bits() && rm.Is32Bits()); | 
 |  1078   DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x); | 
 |  1079 } | 
 |  1080  | 
 |  1081  | 
 |  1082 void Assembler::smulh(const Register& rd, | 
 |  1083                       const Register& rn, | 
 |  1084                       const Register& rm) { | 
 |  1085   ASSERT(AreSameSizeAndType(rd, rn, rm)); | 
 |  1086   DataProcessing3Source(rd, rn, rm, xzr, SMULH_x); | 
 |  1087 } | 
 |  1088  | 
 |  1089  | 
 |  1090 void Assembler::sdiv(const Register& rd, | 
 |  1091                      const Register& rn, | 
 |  1092                      const Register& rm) { | 
 |  1093   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |  1094   ASSERT(rd.SizeInBits() == rm.SizeInBits()); | 
 |  1095   Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); | 
 |  1096 } | 
 |  1097  | 
 |  1098  | 
 |  1099 void Assembler::udiv(const Register& rd, | 
 |  1100                      const Register& rn, | 
 |  1101                      const Register& rm) { | 
 |  1102   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |  1103   ASSERT(rd.SizeInBits() == rm.SizeInBits()); | 
 |  1104   Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); | 
 |  1105 } | 
 |  1106  | 
 |  1107  | 
 |  1108 void Assembler::rbit(const Register& rd, | 
 |  1109                      const Register& rn) { | 
 |  1110   DataProcessing1Source(rd, rn, RBIT); | 
 |  1111 } | 
 |  1112  | 
 |  1113  | 
 |  1114 void Assembler::rev16(const Register& rd, | 
 |  1115                       const Register& rn) { | 
 |  1116   DataProcessing1Source(rd, rn, REV16); | 
 |  1117 } | 
 |  1118  | 
 |  1119  | 
 |  1120 void Assembler::rev32(const Register& rd, | 
 |  1121                       const Register& rn) { | 
 |  1122   ASSERT(rd.Is64Bits()); | 
 |  1123   DataProcessing1Source(rd, rn, REV); | 
 |  1124 } | 
 |  1125  | 
 |  1126  | 
 |  1127 void Assembler::rev(const Register& rd, | 
 |  1128                     const Register& rn) { | 
 |  1129   DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); | 
 |  1130 } | 
 |  1131  | 
 |  1132  | 
 |  1133 void Assembler::clz(const Register& rd, | 
 |  1134                     const Register& rn) { | 
 |  1135   DataProcessing1Source(rd, rn, CLZ); | 
 |  1136 } | 
 |  1137  | 
 |  1138  | 
 |  1139 void Assembler::cls(const Register& rd, | 
 |  1140                     const Register& rn) { | 
 |  1141   DataProcessing1Source(rd, rn, CLS); | 
 |  1142 } | 
 |  1143  | 
 |  1144  | 
 |  1145 void Assembler::ldp(const CPURegister& rt, | 
 |  1146                     const CPURegister& rt2, | 
 |  1147                     const MemOperand& src) { | 
 |  1148   LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); | 
 |  1149 } | 
 |  1150  | 
 |  1151  | 
 |  1152 void Assembler::stp(const CPURegister& rt, | 
 |  1153                     const CPURegister& rt2, | 
 |  1154                     const MemOperand& dst) { | 
 |  1155   LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); | 
 |  1156 } | 
 |  1157  | 
 |  1158  | 
 |  1159 void Assembler::ldpsw(const Register& rt, | 
 |  1160                       const Register& rt2, | 
 |  1161                       const MemOperand& src) { | 
 |  1162   ASSERT(rt.Is64Bits()); | 
 |  1163   LoadStorePair(rt, rt2, src, LDPSW_x); | 
 |  1164 } | 
 |  1165  | 
 |  1166  | 
 |  1167 void Assembler::LoadStorePair(const CPURegister& rt, | 
 |  1168                               const CPURegister& rt2, | 
 |  1169                               const MemOperand& addr, | 
 |  1170                               LoadStorePairOp op) { | 
 |  1171   // 'rt' and 'rt2' can only be aliased for stores. | 
 |  1172   ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); | 
 |  1173   ASSERT(AreSameSizeAndType(rt, rt2)); | 
 |  1174  | 
 |  1175   Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | | 
 |  1176                 ImmLSPair(addr.offset(), CalcLSPairDataSize(op)); | 
 |  1177  | 
 |  1178   Instr addrmodeop; | 
 |  1179   if (addr.IsImmediateOffset()) { | 
 |  1180     addrmodeop = LoadStorePairOffsetFixed; | 
 |  1181   } else { | 
 |  1182     // Pre-index and post-index modes. | 
 |  1183     ASSERT(!rt.Is(addr.base())); | 
 |  1184     ASSERT(!rt2.Is(addr.base())); | 
 |  1185     ASSERT(addr.offset() != 0); | 
 |  1186     if (addr.IsPreIndex()) { | 
 |  1187       addrmodeop = LoadStorePairPreIndexFixed; | 
 |  1188     } else { | 
 |  1189       ASSERT(addr.IsPostIndex()); | 
 |  1190       addrmodeop = LoadStorePairPostIndexFixed; | 
 |  1191     } | 
 |  1192   } | 
 |  1193   Emit(addrmodeop | memop); | 
 |  1194 } | 
 |  1195  | 
 |  1196  | 
 |  1197 void Assembler::ldnp(const CPURegister& rt, | 
 |  1198                      const CPURegister& rt2, | 
 |  1199                      const MemOperand& src) { | 
 |  1200   LoadStorePairNonTemporal(rt, rt2, src, | 
 |  1201                            LoadPairNonTemporalOpFor(rt, rt2)); | 
 |  1202 } | 
 |  1203  | 
 |  1204  | 
 |  1205 void Assembler::stnp(const CPURegister& rt, | 
 |  1206                      const CPURegister& rt2, | 
 |  1207                      const MemOperand& dst) { | 
 |  1208   LoadStorePairNonTemporal(rt, rt2, dst, | 
 |  1209                            StorePairNonTemporalOpFor(rt, rt2)); | 
 |  1210 } | 
 |  1211  | 
 |  1212  | 
 |  1213 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt, | 
 |  1214                                          const CPURegister& rt2, | 
 |  1215                                          const MemOperand& addr, | 
 |  1216                                          LoadStorePairNonTemporalOp op) { | 
 |  1217   ASSERT(!rt.Is(rt2)); | 
 |  1218   ASSERT(AreSameSizeAndType(rt, rt2)); | 
 |  1219   ASSERT(addr.IsImmediateOffset()); | 
 |  1220  | 
 |  1221   LSDataSize size = CalcLSPairDataSize( | 
 |  1222     static_cast<LoadStorePairOp>(op & LoadStorePairMask)); | 
 |  1223   Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | | 
 |  1224        ImmLSPair(addr.offset(), size)); | 
 |  1225 } | 
 |  1226  | 
 |  1227  | 
 |  1228 // Memory instructions. | 
 |  1229 void Assembler::ldrb(const Register& rt, const MemOperand& src) { | 
 |  1230   LoadStore(rt, src, LDRB_w); | 
 |  1231 } | 
 |  1232  | 
 |  1233  | 
 |  1234 void Assembler::strb(const Register& rt, const MemOperand& dst) { | 
 |  1235   LoadStore(rt, dst, STRB_w); | 
 |  1236 } | 
 |  1237  | 
 |  1238  | 
 |  1239 void Assembler::ldrsb(const Register& rt, const MemOperand& src) { | 
 |  1240   LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w); | 
 |  1241 } | 
 |  1242  | 
 |  1243  | 
 |  1244 void Assembler::ldrh(const Register& rt, const MemOperand& src) { | 
 |  1245   LoadStore(rt, src, LDRH_w); | 
 |  1246 } | 
 |  1247  | 
 |  1248  | 
 |  1249 void Assembler::strh(const Register& rt, const MemOperand& dst) { | 
 |  1250   LoadStore(rt, dst, STRH_w); | 
 |  1251 } | 
 |  1252  | 
 |  1253  | 
 |  1254 void Assembler::ldrsh(const Register& rt, const MemOperand& src) { | 
 |  1255   LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w); | 
 |  1256 } | 
 |  1257  | 
 |  1258  | 
 |  1259 void Assembler::ldr(const CPURegister& rt, const MemOperand& src) { | 
 |  1260   LoadStore(rt, src, LoadOpFor(rt)); | 
 |  1261 } | 
 |  1262  | 
 |  1263  | 
 |  1264 void Assembler::str(const CPURegister& rt, const MemOperand& src) { | 
 |  1265   LoadStore(rt, src, StoreOpFor(rt)); | 
 |  1266 } | 
 |  1267  | 
 |  1268  | 
 |  1269 void Assembler::ldrsw(const Register& rt, const MemOperand& src) { | 
 |  1270   ASSERT(rt.Is64Bits()); | 
 |  1271   LoadStore(rt, src, LDRSW_x); | 
 |  1272 } | 
 |  1273  | 
 |  1274  | 
 |  1275 void Assembler::ldr(const Register& rt, uint64_t imm) { | 
 |  1276   // TODO(all): Constant pool may be garbage collected. Hence we cannot store | 
 |  1277   // TODO(all): arbitrary values in them. Manually move it for now. | 
 |  1278   // TODO(all): Fix MacroAssembler::Fmov when this is implemented. | 
 |  1279   UNIMPLEMENTED(); | 
 |  1280 } | 
 |  1281  | 
 |  1282  | 
 |  1283 void Assembler::ldr(const FPRegister& ft, double imm) { | 
 |  1284   // TODO(all): Constant pool may be garbage collected. Hence we cannot store | 
 |  1285   // TODO(all): arbitrary values in them. Manually move it for now. | 
 |  1286   // TODO(all): Fix MacroAssembler::Fmov when this is implemented. | 
 |  1287   UNIMPLEMENTED(); | 
 |  1288 } | 
 |  1289  | 
 |  1290  | 
 |  1291 void Assembler::mov(const Register& rd, const Register& rm) { | 
 |  1292   // Moves involving the stack pointer are encoded as add immediate with | 
 |  1293   // second operand of zero. Otherwise, orr with first operand zr is | 
 |  1294   // used. | 
 |  1295   if (rd.IsSP() || rm.IsSP()) { | 
 |  1296     add(rd, rm, 0); | 
 |  1297   } else { | 
 |  1298     orr(rd, AppropriateZeroRegFor(rd), rm); | 
 |  1299   } | 
 |  1300 } | 
 |  1301  | 
 |  1302  | 
 |  1303 void Assembler::mvn(const Register& rd, const Operand& operand) { | 
 |  1304   orn(rd, AppropriateZeroRegFor(rd), operand); | 
 |  1305 } | 
 |  1306  | 
 |  1307  | 
 |  1308 void Assembler::mrs(const Register& rt, SystemRegister sysreg) { | 
 |  1309   ASSERT(rt.Is64Bits()); | 
 |  1310   Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); | 
 |  1311 } | 
 |  1312  | 
 |  1313  | 
 |  1314 void Assembler::msr(SystemRegister sysreg, const Register& rt) { | 
 |  1315   ASSERT(rt.Is64Bits()); | 
 |  1316   Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); | 
 |  1317 } | 
 |  1318  | 
 |  1319  | 
 |  1320 void Assembler::hint(SystemHint code) { | 
 |  1321   Emit(HINT | ImmHint(code) | Rt(xzr)); | 
 |  1322 } | 
 |  1323  | 
 |  1324  | 
 |  1325 void Assembler::dmb(BarrierDomain domain, BarrierType type) { | 
 |  1326   Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); | 
 |  1327 } | 
 |  1328  | 
 |  1329  | 
 |  1330 void Assembler::dsb(BarrierDomain domain, BarrierType type) { | 
 |  1331   Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); | 
 |  1332 } | 
 |  1333  | 
 |  1334  | 
 |  1335 void Assembler::isb() { | 
 |  1336   Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); | 
 |  1337 } | 
 |  1338  | 
 |  1339  | 
 |  1340 void Assembler::fmov(FPRegister fd, double imm) { | 
 |  1341   if (fd.Is64Bits() && IsImmFP64(imm)) { | 
 |  1342     Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); | 
 |  1343   } else if (fd.Is32Bits() && IsImmFP32(imm)) { | 
 |  1344     Emit(FMOV_s_imm | Rd(fd) | ImmFP32(static_cast<float>(imm))); | 
 |  1345   } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { | 
 |  1346     Register zr = AppropriateZeroRegFor(fd); | 
 |  1347     fmov(fd, zr); | 
 |  1348   } else { | 
 |  1349     ldr(fd, imm); | 
 |  1350   } | 
 |  1351 } | 
 |  1352  | 
 |  1353  | 
 |  1354 void Assembler::fmov(Register rd, FPRegister fn) { | 
 |  1355   ASSERT(rd.SizeInBits() == fn.SizeInBits()); | 
 |  1356   FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; | 
 |  1357   Emit(op | Rd(rd) | Rn(fn)); | 
 |  1358 } | 
 |  1359  | 
 |  1360  | 
 |  1361 void Assembler::fmov(FPRegister fd, Register rn) { | 
 |  1362   ASSERT(fd.SizeInBits() == rn.SizeInBits()); | 
 |  1363   FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; | 
 |  1364   Emit(op | Rd(fd) | Rn(rn)); | 
 |  1365 } | 
 |  1366  | 
 |  1367  | 
 |  1368 void Assembler::fmov(FPRegister fd, FPRegister fn) { | 
 |  1369   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1370   Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn)); | 
 |  1371 } | 
 |  1372  | 
 |  1373  | 
 |  1374 void Assembler::fadd(const FPRegister& fd, | 
 |  1375                      const FPRegister& fn, | 
 |  1376                      const FPRegister& fm) { | 
 |  1377   FPDataProcessing2Source(fd, fn, fm, FADD); | 
 |  1378 } | 
 |  1379  | 
 |  1380  | 
 |  1381 void Assembler::fsub(const FPRegister& fd, | 
 |  1382                      const FPRegister& fn, | 
 |  1383                      const FPRegister& fm) { | 
 |  1384   FPDataProcessing2Source(fd, fn, fm, FSUB); | 
 |  1385 } | 
 |  1386  | 
 |  1387  | 
 |  1388 void Assembler::fmul(const FPRegister& fd, | 
 |  1389                      const FPRegister& fn, | 
 |  1390                      const FPRegister& fm) { | 
 |  1391   FPDataProcessing2Source(fd, fn, fm, FMUL); | 
 |  1392 } | 
 |  1393  | 
 |  1394  | 
 |  1395 void Assembler::fmadd(const FPRegister& fd, | 
 |  1396                       const FPRegister& fn, | 
 |  1397                       const FPRegister& fm, | 
 |  1398                       const FPRegister& fa) { | 
 |  1399   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d); | 
 |  1400 } | 
 |  1401  | 
 |  1402  | 
 |  1403 void Assembler::fmsub(const FPRegister& fd, | 
 |  1404                       const FPRegister& fn, | 
 |  1405                       const FPRegister& fm, | 
 |  1406                       const FPRegister& fa) { | 
 |  1407   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d); | 
 |  1408 } | 
 |  1409  | 
 |  1410  | 
 |  1411 void Assembler::fnmadd(const FPRegister& fd, | 
 |  1412                        const FPRegister& fn, | 
 |  1413                        const FPRegister& fm, | 
 |  1414                        const FPRegister& fa) { | 
 |  1415   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d); | 
 |  1416 } | 
 |  1417  | 
 |  1418  | 
 |  1419 void Assembler::fnmsub(const FPRegister& fd, | 
 |  1420                        const FPRegister& fn, | 
 |  1421                        const FPRegister& fm, | 
 |  1422                        const FPRegister& fa) { | 
 |  1423   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d); | 
 |  1424 } | 
 |  1425  | 
 |  1426  | 
 |  1427 void Assembler::fdiv(const FPRegister& fd, | 
 |  1428                      const FPRegister& fn, | 
 |  1429                      const FPRegister& fm) { | 
 |  1430   FPDataProcessing2Source(fd, fn, fm, FDIV); | 
 |  1431 } | 
 |  1432  | 
 |  1433  | 
 |  1434 void Assembler::fmax(const FPRegister& fd, | 
 |  1435                      const FPRegister& fn, | 
 |  1436                      const FPRegister& fm) { | 
 |  1437   FPDataProcessing2Source(fd, fn, fm, FMAX); | 
 |  1438 } | 
 |  1439  | 
 |  1440  | 
 |  1441 void Assembler::fmaxnm(const FPRegister& fd, | 
 |  1442                        const FPRegister& fn, | 
 |  1443                        const FPRegister& fm) { | 
 |  1444   FPDataProcessing2Source(fd, fn, fm, FMAXNM); | 
 |  1445 } | 
 |  1446  | 
 |  1447  | 
 |  1448 void Assembler::fmin(const FPRegister& fd, | 
 |  1449                      const FPRegister& fn, | 
 |  1450                      const FPRegister& fm) { | 
 |  1451   FPDataProcessing2Source(fd, fn, fm, FMIN); | 
 |  1452 } | 
 |  1453  | 
 |  1454  | 
 |  1455 void Assembler::fminnm(const FPRegister& fd, | 
 |  1456                        const FPRegister& fn, | 
 |  1457                        const FPRegister& fm) { | 
 |  1458   FPDataProcessing2Source(fd, fn, fm, FMINNM); | 
 |  1459 } | 
 |  1460  | 
 |  1461  | 
 |  1462 void Assembler::fabs(const FPRegister& fd, | 
 |  1463                      const FPRegister& fn) { | 
 |  1464   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1465   FPDataProcessing1Source(fd, fn, FABS); | 
 |  1466 } | 
 |  1467  | 
 |  1468  | 
 |  1469 void Assembler::fneg(const FPRegister& fd, | 
 |  1470                      const FPRegister& fn) { | 
 |  1471   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1472   FPDataProcessing1Source(fd, fn, FNEG); | 
 |  1473 } | 
 |  1474  | 
 |  1475  | 
 |  1476 void Assembler::fsqrt(const FPRegister& fd, | 
 |  1477                       const FPRegister& fn) { | 
 |  1478   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1479   FPDataProcessing1Source(fd, fn, FSQRT); | 
 |  1480 } | 
 |  1481  | 
 |  1482  | 
 |  1483 void Assembler::frinta(const FPRegister& fd, | 
 |  1484                        const FPRegister& fn) { | 
 |  1485   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1486   FPDataProcessing1Source(fd, fn, FRINTA); | 
 |  1487 } | 
 |  1488  | 
 |  1489  | 
 |  1490 void Assembler::frintn(const FPRegister& fd, | 
 |  1491                        const FPRegister& fn) { | 
 |  1492   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1493   FPDataProcessing1Source(fd, fn, FRINTN); | 
 |  1494 } | 
 |  1495  | 
 |  1496  | 
 |  1497 void Assembler::frintz(const FPRegister& fd, | 
 |  1498                        const FPRegister& fn) { | 
 |  1499   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1500   FPDataProcessing1Source(fd, fn, FRINTZ); | 
 |  1501 } | 
 |  1502  | 
 |  1503  | 
 |  1504 void Assembler::fcmp(const FPRegister& fn, | 
 |  1505                      const FPRegister& fm) { | 
 |  1506   ASSERT(fn.SizeInBits() == fm.SizeInBits()); | 
 |  1507   Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn)); | 
 |  1508 } | 
 |  1509  | 
 |  1510  | 
 |  1511 void Assembler::fcmp(const FPRegister& fn, | 
 |  1512                      double value) { | 
 |  1513   USE(value); | 
 |  1514   // Although the fcmp instruction can strictly only take an immediate value of | 
 |  1515   // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't | 
 |  1516   // affect the result of the comparison. | 
 |  1517   ASSERT(value == 0.0); | 
 |  1518   Emit(FPType(fn) | FCMP_zero | Rn(fn)); | 
 |  1519 } | 
 |  1520  | 
 |  1521  | 
 |  1522 void Assembler::fccmp(const FPRegister& fn, | 
 |  1523                       const FPRegister& fm, | 
 |  1524                       StatusFlags nzcv, | 
 |  1525                       Condition cond) { | 
 |  1526   ASSERT(fn.SizeInBits() == fm.SizeInBits()); | 
 |  1527   Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv)); | 
 |  1528 } | 
 |  1529  | 
 |  1530  | 
 |  1531 void Assembler::fcsel(const FPRegister& fd, | 
 |  1532                       const FPRegister& fn, | 
 |  1533                       const FPRegister& fm, | 
 |  1534                       Condition cond) { | 
 |  1535   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1536   ASSERT(fd.SizeInBits() == fm.SizeInBits()); | 
 |  1537   Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd)); | 
 |  1538 } | 
 |  1539  | 
 |  1540  | 
 |  1541 void Assembler::FPConvertToInt(const Register& rd, | 
 |  1542                                const FPRegister& fn, | 
 |  1543                                FPIntegerConvertOp op) { | 
 |  1544   Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd)); | 
 |  1545 } | 
 |  1546  | 
 |  1547  | 
 |  1548 void Assembler::fcvt(const FPRegister& fd, | 
 |  1549                      const FPRegister& fn) { | 
 |  1550   if (fd.Is64Bits()) { | 
 |  1551     // Convert float to double. | 
 |  1552     ASSERT(fn.Is32Bits()); | 
 |  1553     FPDataProcessing1Source(fd, fn, FCVT_ds); | 
 |  1554   } else { | 
 |  1555     // Convert double to float. | 
 |  1556     ASSERT(fn.Is64Bits()); | 
 |  1557     FPDataProcessing1Source(fd, fn, FCVT_sd); | 
 |  1558   } | 
 |  1559 } | 
 |  1560  | 
 |  1561  | 
 |  1562 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) { | 
 |  1563   FPConvertToInt(rd, fn, FCVTAU); | 
 |  1564 } | 
 |  1565  | 
 |  1566  | 
 |  1567 void Assembler::fcvtas(const Register& rd, const FPRegister& fn) { | 
 |  1568   FPConvertToInt(rd, fn, FCVTAS); | 
 |  1569 } | 
 |  1570  | 
 |  1571  | 
 |  1572 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) { | 
 |  1573   FPConvertToInt(rd, fn, FCVTMU); | 
 |  1574 } | 
 |  1575  | 
 |  1576  | 
 |  1577 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) { | 
 |  1578   FPConvertToInt(rd, fn, FCVTMS); | 
 |  1579 } | 
 |  1580  | 
 |  1581  | 
 |  1582 void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) { | 
 |  1583   FPConvertToInt(rd, fn, FCVTNU); | 
 |  1584 } | 
 |  1585  | 
 |  1586  | 
 |  1587 void Assembler::fcvtns(const Register& rd, const FPRegister& fn) { | 
 |  1588   FPConvertToInt(rd, fn, FCVTNS); | 
 |  1589 } | 
 |  1590  | 
 |  1591  | 
 |  1592 void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) { | 
 |  1593   FPConvertToInt(rd, fn, FCVTZU); | 
 |  1594 } | 
 |  1595  | 
 |  1596  | 
 |  1597 void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) { | 
 |  1598   FPConvertToInt(rd, fn, FCVTZS); | 
 |  1599 } | 
 |  1600  | 
 |  1601  | 
 |  1602 void Assembler::scvtf(const FPRegister& fd, | 
 |  1603                       const Register& rn, | 
 |  1604                       unsigned fbits) { | 
 |  1605   if (fbits == 0) { | 
 |  1606     Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd)); | 
 |  1607   } else { | 
 |  1608     Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | | 
 |  1609          Rd(fd)); | 
 |  1610   } | 
 |  1611 } | 
 |  1612  | 
 |  1613  | 
 |  1614 void Assembler::ucvtf(const FPRegister& fd, | 
 |  1615                       const Register& rn, | 
 |  1616                       unsigned fbits) { | 
 |  1617   if (fbits == 0) { | 
 |  1618     Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd)); | 
 |  1619   } else { | 
 |  1620     Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | | 
 |  1621          Rd(fd)); | 
 |  1622   } | 
 |  1623 } | 
 |  1624  | 
 |  1625  | 
 |  1626 // Note: | 
 |  1627 // Below, a difference in case for the same letter indicates a | 
 |  1628 // negated bit. | 
 |  1629 // If b is 1, then B is 0. | 
 |  1630 Instr Assembler::ImmFP32(float imm) { | 
 |  1631   ASSERT(IsImmFP32(imm)); | 
 |  1632   // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 | 
 |  1633   uint32_t bits = float_to_rawbits(imm); | 
 |  1634   // bit7: a000.0000 | 
 |  1635   uint32_t bit7 = ((bits >> 31) & 0x1) << 7; | 
 |  1636   // bit6: 0b00.0000 | 
 |  1637   uint32_t bit6 = ((bits >> 29) & 0x1) << 6; | 
 |  1638   // bit5_to_0: 00cd.efgh | 
 |  1639   uint32_t bit5_to_0 = (bits >> 19) & 0x3f; | 
 |  1640  | 
 |  1641   return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; | 
 |  1642 } | 
 |  1643  | 
 |  1644  | 
 |  1645 Instr Assembler::ImmFP64(double imm) { | 
 |  1646   ASSERT(IsImmFP64(imm)); | 
 |  1647   // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 | 
 |  1648   //       0000.0000.0000.0000.0000.0000.0000.0000 | 
 |  1649   uint64_t bits = double_to_rawbits(imm); | 
 |  1650   // bit7: a000.0000 | 
 |  1651   uint32_t bit7 = ((bits >> 63) & 0x1) << 7; | 
 |  1652   // bit6: 0b00.0000 | 
 |  1653   uint32_t bit6 = ((bits >> 61) & 0x1) << 6; | 
 |  1654   // bit5_to_0: 00cd.efgh | 
 |  1655   uint32_t bit5_to_0 = (bits >> 48) & 0x3f; | 
 |  1656  | 
 |  1657   return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; | 
 |  1658 } | 
 |  1659  | 
 |  1660  | 
 |  1661 // Code generation helpers. | 
 |  1662 void Assembler::MoveWide(const Register& rd, | 
 |  1663                          uint64_t imm, | 
 |  1664                          int shift, | 
 |  1665                          MoveWideImmediateOp mov_op) { | 
 |  1666   if (shift >= 0) { | 
 |  1667     // Explicit shift specified. | 
 |  1668     ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); | 
 |  1669     ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); | 
 |  1670     shift /= 16; | 
 |  1671   } else { | 
 |  1672     // Calculate a new immediate and shift combination to encode the immediate | 
 |  1673     // argument. | 
 |  1674     shift = 0; | 
 |  1675     if ((imm & ~0xffffUL) == 0) { | 
 |  1676       // Nothing to do. | 
 |  1677     } else if ((imm & ~(0xffffUL << 16)) == 0) { | 
 |  1678       imm >>= 16; | 
 |  1679       shift = 1; | 
 |  1680     } else if ((imm & ~(0xffffUL << 32)) == 0) { | 
 |  1681       ASSERT(rd.Is64Bits()); | 
 |  1682       imm >>= 32; | 
 |  1683       shift = 2; | 
 |  1684     } else if ((imm & ~(0xffffUL << 48)) == 0) { | 
 |  1685       ASSERT(rd.Is64Bits()); | 
 |  1686       imm >>= 48; | 
 |  1687       shift = 3; | 
 |  1688     } | 
 |  1689   } | 
 |  1690  | 
 |  1691   ASSERT(is_uint16(imm)); | 
 |  1692  | 
 |  1693   Emit(SF(rd) | MoveWideImmediateFixed | mov_op | | 
 |  1694        Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift)); | 
 |  1695 } | 
 |  1696  | 
 |  1697  | 
 |  1698 void Assembler::AddSub(const Register& rd, | 
 |  1699                        const Register& rn, | 
 |  1700                        const Operand& operand, | 
 |  1701                        FlagsUpdate S, | 
 |  1702                        AddSubOp op) { | 
 |  1703   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |  1704   ASSERT(!operand.NeedsRelocation()); | 
 |  1705   if (operand.IsImmediate()) { | 
 |  1706     int64_t immediate = operand.immediate(); | 
 |  1707     ASSERT(IsImmAddSub(immediate)); | 
 |  1708     Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); | 
 |  1709     Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | | 
 |  1710          ImmAddSub(immediate) | dest_reg | RnSP(rn)); | 
 |  1711   } else if (operand.IsShiftedRegister()) { | 
 |  1712     ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | 
 |  1713     ASSERT(operand.shift() != ROR); | 
 |  1714  | 
 |  1715     // For instructions of the form: | 
 |  1716     //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ] | 
 |  1717     //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ] | 
 |  1718     //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ] | 
 |  1719     //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ] | 
 |  1720     // or their 64-bit register equivalents, convert the operand from shifted to | 
 |  1721     // extended register mode, and emit an add/sub extended instruction. | 
 |  1722     if (rn.IsSP() || rd.IsSP()) { | 
 |  1723       ASSERT(!(rd.IsSP() && (S == SetFlags))); | 
 |  1724       DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S, | 
 |  1725                                AddSubExtendedFixed | op); | 
 |  1726     } else { | 
 |  1727       DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); | 
 |  1728     } | 
 |  1729   } else { | 
 |  1730     ASSERT(operand.IsExtendedRegister()); | 
 |  1731     DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); | 
 |  1732   } | 
 |  1733 } | 
 |  1734  | 
 |  1735  | 
 |  1736 void Assembler::AddSubWithCarry(const Register& rd, | 
 |  1737                                 const Register& rn, | 
 |  1738                                 const Operand& operand, | 
 |  1739                                 FlagsUpdate S, | 
 |  1740                                 AddSubWithCarryOp op) { | 
 |  1741   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |  1742   ASSERT(rd.SizeInBits() == operand.reg().SizeInBits()); | 
 |  1743   ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); | 
 |  1744   ASSERT(!operand.NeedsRelocation()); | 
 |  1745   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); | 
 |  1746 } | 
 |  1747  | 
 |  1748  | 
 |  1749 void Assembler::hlt(int code) { | 
 |  1750   ASSERT(is_uint16(code)); | 
 |  1751   Emit(HLT | ImmException(code)); | 
 |  1752 } | 
 |  1753  | 
 |  1754  | 
 |  1755 void Assembler::brk(int code) { | 
 |  1756   ASSERT(is_uint16(code)); | 
 |  1757   Emit(BRK | ImmException(code)); | 
 |  1758 } | 
 |  1759  | 
 |  1760  | 
 |  1761 void Assembler::debug(const char* message, uint32_t code, Instr params) { | 
 |  1762 #ifdef USE_SIMULATOR | 
 |  1763   // Don't generate simulator specific code if we are building a snapshot, which | 
 |  1764   // might be run on real hardware. | 
 |  1765   if (!Serializer::enabled()) { | 
 |  1766 #ifdef DEBUG | 
 |  1767     Serializer::TooLateToEnableNow(); | 
 |  1768 #endif | 
 |  1769     // The arguments to the debug marker need to be contiguous in memory, so | 
 |  1770     // make sure we don't try to emit a literal pool. | 
 |  1771     BlockConstPoolScope scope(this); | 
 |  1772  | 
 |  1773     Label start; | 
 |  1774     bind(&start); | 
 |  1775  | 
 |  1776     // Refer to instructions-a64.h for a description of the marker and its | 
 |  1777     // arguments. | 
 |  1778     hlt(kImmExceptionIsDebug); | 
 |  1779     ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset); | 
 |  1780     dc32(code); | 
 |  1781     ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset); | 
 |  1782     dc32(params); | 
 |  1783     ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset); | 
 |  1784     EmitStringData(message); | 
 |  1785     hlt(kImmExceptionIsUnreachable); | 
 |  1786  | 
 |  1787     return; | 
 |  1788   } | 
 |  1789   // Fall through if Serializer is enabled. | 
 |  1790 #endif | 
 |  1791  | 
 |  1792   if (params & BREAK) { | 
 |  1793     hlt(kImmExceptionIsDebug); | 
 |  1794   } | 
 |  1795 } | 
 |  1796  | 
 |  1797  | 
 |  1798 void Assembler::Logical(const Register& rd, | 
 |  1799                         const Register& rn, | 
 |  1800                         const Operand& operand, | 
 |  1801                         LogicalOp op) { | 
 |  1802   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |  1803   ASSERT(!operand.NeedsRelocation()); | 
 |  1804   if (operand.IsImmediate()) { | 
 |  1805     int64_t immediate = operand.immediate(); | 
 |  1806     unsigned reg_size = rd.SizeInBits(); | 
 |  1807  | 
 |  1808     ASSERT(immediate != 0); | 
 |  1809     ASSERT(immediate != -1); | 
 |  1810     ASSERT(rd.Is64Bits() || is_uint32(immediate)); | 
 |  1811  | 
 |  1812     // If the operation is NOT, invert the operation and immediate. | 
 |  1813     if ((op & NOT) == NOT) { | 
 |  1814       op = static_cast<LogicalOp>(op & ~NOT); | 
 |  1815       immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); | 
 |  1816     } | 
 |  1817  | 
 |  1818     unsigned n, imm_s, imm_r; | 
 |  1819     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { | 
 |  1820       // Immediate can be encoded in the instruction. | 
 |  1821       LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | 
 |  1822     } else { | 
 |  1823       // This case is handled in the macro assembler. | 
 |  1824       UNREACHABLE(); | 
 |  1825     } | 
 |  1826   } else { | 
 |  1827     ASSERT(operand.IsShiftedRegister()); | 
 |  1828     ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | 
 |  1829     Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); | 
 |  1830     DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); | 
 |  1831   } | 
 |  1832 } | 
 |  1833  | 
 |  1834  | 
 |  1835 void Assembler::LogicalImmediate(const Register& rd, | 
 |  1836                                  const Register& rn, | 
 |  1837                                  unsigned n, | 
 |  1838                                  unsigned imm_s, | 
 |  1839                                  unsigned imm_r, | 
 |  1840                                  LogicalOp op) { | 
 |  1841   unsigned reg_size = rd.SizeInBits(); | 
 |  1842   Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd); | 
 |  1843   Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | | 
 |  1844        ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | | 
 |  1845        Rn(rn)); | 
 |  1846 } | 
 |  1847  | 
 |  1848  | 
 |  1849 void Assembler::ConditionalCompare(const Register& rn, | 
 |  1850                                    const Operand& operand, | 
 |  1851                                    StatusFlags nzcv, | 
 |  1852                                    Condition cond, | 
 |  1853                                    ConditionalCompareOp op) { | 
 |  1854   Instr ccmpop; | 
 |  1855   ASSERT(!operand.NeedsRelocation()); | 
 |  1856   if (operand.IsImmediate()) { | 
 |  1857     int64_t immediate = operand.immediate(); | 
 |  1858     ASSERT(IsImmConditionalCompare(immediate)); | 
 |  1859     ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate); | 
 |  1860   } else { | 
 |  1861     ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); | 
 |  1862     ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); | 
 |  1863   } | 
 |  1864   Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); | 
 |  1865 } | 
 |  1866  | 
 |  1867  | 
 |  1868 void Assembler::DataProcessing1Source(const Register& rd, | 
 |  1869                                       const Register& rn, | 
 |  1870                                       DataProcessing1SourceOp op) { | 
 |  1871   ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 
 |  1872   Emit(SF(rn) | op | Rn(rn) | Rd(rd)); | 
 |  1873 } | 
 |  1874  | 
 |  1875  | 
 |  1876 void Assembler::FPDataProcessing1Source(const FPRegister& fd, | 
 |  1877                                         const FPRegister& fn, | 
 |  1878                                         FPDataProcessing1SourceOp op) { | 
 |  1879   Emit(FPType(fn) | op | Rn(fn) | Rd(fd)); | 
 |  1880 } | 
 |  1881  | 
 |  1882  | 
 |  1883 void Assembler::FPDataProcessing2Source(const FPRegister& fd, | 
 |  1884                                         const FPRegister& fn, | 
 |  1885                                         const FPRegister& fm, | 
 |  1886                                         FPDataProcessing2SourceOp op) { | 
 |  1887   ASSERT(fd.SizeInBits() == fn.SizeInBits()); | 
 |  1888   ASSERT(fd.SizeInBits() == fm.SizeInBits()); | 
 |  1889   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd)); | 
 |  1890 } | 
 |  1891  | 
 |  1892  | 
 |  1893 void Assembler::FPDataProcessing3Source(const FPRegister& fd, | 
 |  1894                                         const FPRegister& fn, | 
 |  1895                                         const FPRegister& fm, | 
 |  1896                                         const FPRegister& fa, | 
 |  1897                                         FPDataProcessing3SourceOp op) { | 
 |  1898   ASSERT(AreSameSizeAndType(fd, fn, fm, fa)); | 
 |  1899   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa)); | 
 |  1900 } | 
 |  1901  | 
 |  1902  | 
 |  1903 void Assembler::EmitShift(const Register& rd, | 
 |  1904                           const Register& rn, | 
 |  1905                           Shift shift, | 
 |  1906                           unsigned shift_amount) { | 
 |  1907   switch (shift) { | 
 |  1908     case LSL: | 
 |  1909       lsl(rd, rn, shift_amount); | 
 |  1910       break; | 
 |  1911     case LSR: | 
 |  1912       lsr(rd, rn, shift_amount); | 
 |  1913       break; | 
 |  1914     case ASR: | 
 |  1915       asr(rd, rn, shift_amount); | 
 |  1916       break; | 
 |  1917     case ROR: | 
 |  1918       ror(rd, rn, shift_amount); | 
 |  1919       break; | 
 |  1920     default: | 
 |  1921       UNREACHABLE(); | 
 |  1922   } | 
 |  1923 } | 
 |  1924  | 
 |  1925  | 
 |  1926 void Assembler::EmitExtendShift(const Register& rd, | 
 |  1927                                 const Register& rn, | 
 |  1928                                 Extend extend, | 
 |  1929                                 unsigned left_shift) { | 
 |  1930   ASSERT(rd.SizeInBits() >= rn.SizeInBits()); | 
 |  1931   unsigned reg_size = rd.SizeInBits(); | 
 |  1932   // Use the correct size of register. | 
 |  1933   Register rn_ = Register::Create(rn.code(), rd.SizeInBits()); | 
 |  1934   // Bits extracted are high_bit:0. | 
 |  1935   unsigned high_bit = (8 << (extend & 0x3)) - 1; | 
 |  1936   // Number of bits left in the result that are not introduced by the shift. | 
 |  1937   unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1); | 
 |  1938  | 
 |  1939   if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { | 
 |  1940     switch (extend) { | 
 |  1941       case UXTB: | 
 |  1942       case UXTH: | 
 |  1943       case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break; | 
 |  1944       case SXTB: | 
 |  1945       case SXTH: | 
 |  1946       case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break; | 
 |  1947       case UXTX: | 
 |  1948       case SXTX: { | 
 |  1949         ASSERT(rn.SizeInBits() == kXRegSize); | 
 |  1950         // Nothing to extend. Just shift. | 
 |  1951         lsl(rd, rn_, left_shift); | 
 |  1952         break; | 
 |  1953       } | 
 |  1954       default: UNREACHABLE(); | 
 |  1955     } | 
 |  1956   } else { | 
 |  1957     // No need to extend as the extended bits would be shifted away. | 
 |  1958     lsl(rd, rn_, left_shift); | 
 |  1959   } | 
 |  1960 } | 
 |  1961  | 
 |  1962  | 
 |  1963 void Assembler::DataProcShiftedRegister(const Register& rd, | 
 |  1964                                         const Register& rn, | 
 |  1965                                         const Operand& operand, | 
 |  1966                                         FlagsUpdate S, | 
 |  1967                                         Instr op) { | 
 |  1968   ASSERT(operand.IsShiftedRegister()); | 
 |  1969   ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); | 
 |  1970   ASSERT(!operand.NeedsRelocation()); | 
 |  1971   Emit(SF(rd) | op | Flags(S) | | 
 |  1972        ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | | 
 |  1973        Rm(operand.reg()) | Rn(rn) | Rd(rd)); | 
 |  1974 } | 
 |  1975  | 
 |  1976  | 
 |  1977 void Assembler::DataProcExtendedRegister(const Register& rd, | 
 |  1978                                          const Register& rn, | 
 |  1979                                          const Operand& operand, | 
 |  1980                                          FlagsUpdate S, | 
 |  1981                                          Instr op) { | 
 |  1982   ASSERT(!operand.NeedsRelocation()); | 
 |  1983   Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); | 
 |  1984   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | | 
 |  1985        ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) | | 
 |  1986        dest_reg | RnSP(rn)); | 
 |  1987 } | 
 |  1988  | 
 |  1989  | 
 |  1990 bool Assembler::IsImmAddSub(int64_t immediate) { | 
 |  1991   return is_uint12(immediate) || | 
 |  1992          (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0)); | 
 |  1993 } | 
 |  1994  | 
 |  1995 void Assembler::LoadStore(const CPURegister& rt, | 
 |  1996                           const MemOperand& addr, | 
 |  1997                           LoadStoreOp op) { | 
 |  1998   Instr memop = op | Rt(rt) | RnSP(addr.base()); | 
 |  1999   ptrdiff_t offset = addr.offset(); | 
 |  2000  | 
 |  2001   if (addr.IsImmediateOffset()) { | 
 |  2002     LSDataSize size = CalcLSDataSize(op); | 
 |  2003     if (IsImmLSScaled(offset, size)) { | 
 |  2004       // Use the scaled addressing mode. | 
 |  2005       Emit(LoadStoreUnsignedOffsetFixed | memop | | 
 |  2006            ImmLSUnsigned(offset >> size)); | 
 |  2007     } else if (IsImmLSUnscaled(offset)) { | 
 |  2008       // Use the unscaled addressing mode. | 
 |  2009       Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset)); | 
 |  2010     } else { | 
 |  2011       // This case is handled in the macro assembler. | 
 |  2012       UNREACHABLE(); | 
 |  2013     } | 
 |  2014   } else if (addr.IsRegisterOffset()) { | 
 |  2015     Extend ext = addr.extend(); | 
 |  2016     Shift shift = addr.shift(); | 
 |  2017     unsigned shift_amount = addr.shift_amount(); | 
 |  2018  | 
 |  2019     // LSL is encoded in the option field as UXTX. | 
 |  2020     if (shift == LSL) { | 
 |  2021       ext = UXTX; | 
 |  2022     } | 
 |  2023  | 
 |  2024     // Shifts are encoded in one bit, indicating a left shift by the memory | 
 |  2025     // access size. | 
 |  2026     ASSERT((shift_amount == 0) || | 
 |  2027            (shift_amount == static_cast<unsigned>(CalcLSDataSize(op)))); | 
 |  2028     Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) | | 
 |  2029          ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0)); | 
 |  2030   } else { | 
 |  2031     // Pre-index and post-index modes. | 
 |  2032     ASSERT(!rt.Is(addr.base())); | 
 |  2033     if (IsImmLSUnscaled(offset)) { | 
 |  2034       if (addr.IsPreIndex()) { | 
 |  2035         Emit(LoadStorePreIndexFixed | memop | ImmLS(offset)); | 
 |  2036       } else { | 
 |  2037         ASSERT(addr.IsPostIndex()); | 
 |  2038         Emit(LoadStorePostIndexFixed | memop | ImmLS(offset)); | 
 |  2039       } | 
 |  2040     } else { | 
 |  2041       // This case is handled in the macro assembler. | 
 |  2042       UNREACHABLE(); | 
 |  2043     } | 
 |  2044   } | 
 |  2045 } | 
 |  2046  | 
 |  2047  | 
 |  2048 bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) { | 
 |  2049   return is_int9(offset); | 
 |  2050 } | 
 |  2051  | 
 |  2052  | 
 |  2053 bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) { | 
 |  2054   bool offset_is_size_multiple = (((offset >> size) << size) == offset); | 
 |  2055   return offset_is_size_multiple && is_uint12(offset >> size); | 
 |  2056 } | 
 |  2057  | 
 |  2058  | 
 |  2059 void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) { | 
 |  2060   ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0); | 
 |  2061   // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a | 
 |  2062   // constant pool. It should not be emitted. | 
 |  2063   ASSERT(!rt.Is(xzr)); | 
 |  2064   Emit(LDR_x_lit | | 
 |  2065        ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) | | 
 |  2066        Rt(rt)); | 
 |  2067 } | 
 |  2068  | 
 |  2069  | 
 |  2070 void Assembler::LoadRelocatedValue(const CPURegister& rt, | 
 |  2071                                    const Operand& operand, | 
 |  2072                                    LoadLiteralOp op) { | 
 |  2073   int64_t imm = operand.immediate(); | 
 |  2074   ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits())); | 
 |  2075   RecordRelocInfo(operand.rmode(), imm); | 
 |  2076   BlockConstPoolFor(1); | 
 |  2077   Emit(op | ImmLLiteral(0) | Rt(rt)); | 
 |  2078 } | 
 |  2079  | 
 |  2080  | 
 |  2081 // Test if a given value can be encoded in the immediate field of a logical | 
 |  2082 // instruction. | 
 |  2083 // If it can be encoded, the function returns true, and values pointed to by n, | 
 |  2084 // imm_s and imm_r are updated with immediates encoded in the format required | 
 |  2085 // by the corresponding fields in the logical instruction. | 
 |  2086 // If it can not be encoded, the function returns false, and the values pointed | 
 |  2087 // to by n, imm_s and imm_r are undefined. | 
 |  2088 bool Assembler::IsImmLogical(uint64_t value, | 
 |  2089                              unsigned width, | 
 |  2090                              unsigned* n, | 
 |  2091                              unsigned* imm_s, | 
 |  2092                              unsigned* imm_r) { | 
 |  2093   ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); | 
 |  2094   ASSERT((width == kWRegSize) || (width == kXRegSize)); | 
 |  2095  | 
 |  2096   // Logical immediates are encoded using parameters n, imm_s and imm_r using | 
 |  2097   // the following table: | 
 |  2098   // | 
 |  2099   //  N   imms    immr    size        S             R | 
 |  2100   //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr) | 
 |  2101   //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr) | 
 |  2102   //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr) | 
 |  2103   //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr) | 
 |  2104   //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr) | 
 |  2105   //  0  11110s  xxxxxr     2    UInt(s)       UInt(r) | 
 |  2106   // (s bits must not be all set) | 
 |  2107   // | 
 |  2108   // A pattern is constructed of size bits, where the least significant S+1 | 
 |  2109   // bits are set. The pattern is rotated right by R, and repeated across a | 
 |  2110   // 32 or 64-bit value, depending on destination register width. | 
 |  2111   // | 
 |  2112   // To test if an arbitary immediate can be encoded using this scheme, an | 
 |  2113   // iterative algorithm is used. | 
 |  2114   // | 
 |  2115   // TODO(mcapewel) This code does not consider using X/W register overlap to | 
 |  2116   // support 64-bit immediates where the top 32-bits are zero, and the bottom | 
 |  2117   // 32-bits are an encodable logical immediate. | 
 |  2118  | 
 |  2119   // 1. If the value has all set or all clear bits, it can't be encoded. | 
 |  2120   if ((value == 0) || (value == 0xffffffffffffffffUL) || | 
 |  2121       ((width == kWRegSize) && (value == 0xffffffff))) { | 
 |  2122     return false; | 
 |  2123   } | 
 |  2124  | 
 |  2125   unsigned lead_zero = CountLeadingZeros(value, width); | 
 |  2126   unsigned lead_one = CountLeadingZeros(~value, width); | 
 |  2127   unsigned trail_zero = CountTrailingZeros(value, width); | 
 |  2128   unsigned trail_one = CountTrailingZeros(~value, width); | 
 |  2129   unsigned set_bits = CountSetBits(value, width); | 
 |  2130  | 
 |  2131   // The fixed bits in the immediate s field. | 
 |  2132   // If width == 64 (X reg), start at 0xFFFFFF80. | 
 |  2133   // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit | 
 |  2134   // widths won't be executed. | 
 |  2135   int imm_s_fixed = (width == kXRegSize) ? -128 : -64; | 
 |  2136   int imm_s_mask = 0x3F; | 
 |  2137  | 
 |  2138   for (;;) { | 
 |  2139     // 2. If the value is two bits wide, it can be encoded. | 
 |  2140     if (width == 2) { | 
 |  2141       *n = 0; | 
 |  2142       *imm_s = 0x3C; | 
 |  2143       *imm_r = (value & 3) - 1; | 
 |  2144       return true; | 
 |  2145     } | 
 |  2146  | 
 |  2147     *n = (width == 64) ? 1 : 0; | 
 |  2148     *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask); | 
 |  2149     if ((lead_zero + set_bits) == width) { | 
 |  2150       *imm_r = 0; | 
 |  2151     } else { | 
 |  2152       *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one; | 
 |  2153     } | 
 |  2154  | 
 |  2155     // 3. If the sum of leading zeros, trailing zeros and set bits is equal to | 
 |  2156     //    the bit width of the value, it can be encoded. | 
 |  2157     if (lead_zero + trail_zero + set_bits == width) { | 
 |  2158       return true; | 
 |  2159     } | 
 |  2160  | 
 |  2161     // 4. If the sum of leading ones, trailing ones and unset bits in the | 
 |  2162     //    value is equal to the bit width of the value, it can be encoded. | 
 |  2163     if (lead_one + trail_one + (width - set_bits) == width) { | 
 |  2164       return true; | 
 |  2165     } | 
 |  2166  | 
 |  2167     // 5. If the most-significant half of the bitwise value is equal to the | 
 |  2168     //    least-significant half, return to step 2 using the least-significant | 
 |  2169     //    half of the value. | 
 |  2170     uint64_t mask = (1UL << (width >> 1)) - 1; | 
 |  2171     if ((value & mask) == ((value >> (width >> 1)) & mask)) { | 
 |  2172       width >>= 1; | 
 |  2173       set_bits >>= 1; | 
 |  2174       imm_s_fixed >>= 1; | 
 |  2175       continue; | 
 |  2176     } | 
 |  2177  | 
 |  2178     // 6. Otherwise, the value can't be encoded. | 
 |  2179     return false; | 
 |  2180   } | 
 |  2181 } | 
 |  2182  | 
 |  2183  | 
 |  2184 bool Assembler::IsImmConditionalCompare(int64_t immediate) { | 
 |  2185   return is_uint5(immediate); | 
 |  2186 } | 
 |  2187  | 
 |  2188  | 
 |  2189 bool Assembler::IsImmFP32(float imm) { | 
 |  2190   // Valid values will have the form: | 
 |  2191   // aBbb.bbbc.defg.h000.0000.0000.0000.0000 | 
 |  2192   uint32_t bits = float_to_rawbits(imm); | 
 |  2193   // bits[19..0] are cleared. | 
 |  2194   if ((bits & 0x7ffff) != 0) { | 
 |  2195     return false; | 
 |  2196   } | 
 |  2197  | 
 |  2198   // bits[29..25] are all set or all cleared. | 
 |  2199   uint32_t b_pattern = (bits >> 16) & 0x3e00; | 
 |  2200   if (b_pattern != 0 && b_pattern != 0x3e00) { | 
 |  2201     return false; | 
 |  2202   } | 
 |  2203  | 
 |  2204   // bit[30] and bit[29] are opposite. | 
 |  2205   if (((bits ^ (bits << 1)) & 0x40000000) == 0) { | 
 |  2206     return false; | 
 |  2207   } | 
 |  2208  | 
 |  2209   return true; | 
 |  2210 } | 
 |  2211  | 
 |  2212  | 
 |  2213 bool Assembler::IsImmFP64(double imm) { | 
 |  2214   // Valid values will have the form: | 
 |  2215   // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 | 
 |  2216   // 0000.0000.0000.0000.0000.0000.0000.0000 | 
 |  2217   uint64_t bits = double_to_rawbits(imm); | 
 |  2218   // bits[47..0] are cleared. | 
 |  2219   if ((bits & 0xffffffffffffL) != 0) { | 
 |  2220     return false; | 
 |  2221   } | 
 |  2222  | 
 |  2223   // bits[61..54] are all set or all cleared. | 
 |  2224   uint32_t b_pattern = (bits >> 48) & 0x3fc0; | 
 |  2225   if (b_pattern != 0 && b_pattern != 0x3fc0) { | 
 |  2226     return false; | 
 |  2227   } | 
 |  2228  | 
 |  2229   // bit[62] and bit[61] are opposite. | 
 |  2230   if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) { | 
 |  2231     return false; | 
 |  2232   } | 
 |  2233  | 
 |  2234   return true; | 
 |  2235 } | 
 |  2236  | 
 |  2237  | 
 |  2238 void Assembler::GrowBuffer() { | 
 |  2239   if (!own_buffer_) FATAL("external code buffer is too small"); | 
 |  2240  | 
 |  2241   // Compute new buffer size. | 
 |  2242   CodeDesc desc;  // the new buffer | 
 |  2243   if (buffer_size_ < 4 * KB) { | 
 |  2244     desc.buffer_size = 4 * KB; | 
 |  2245   } else if (buffer_size_ < 1 * MB) { | 
 |  2246     desc.buffer_size = 2 * buffer_size_; | 
 |  2247   } else { | 
 |  2248     desc.buffer_size = buffer_size_ + 1 * MB; | 
 |  2249   } | 
 |  2250   CHECK_GT(desc.buffer_size, 0);  // No overflow. | 
 |  2251  | 
 |  2252   byte* buffer = reinterpret_cast<byte*>(buffer_); | 
 |  2253  | 
 |  2254   // Set up new buffer. | 
 |  2255   desc.buffer = NewArray<byte>(desc.buffer_size); | 
 |  2256  | 
 |  2257   desc.instr_size = pc_offset(); | 
 |  2258   desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos(); | 
 |  2259  | 
 |  2260   // Copy the data. | 
 |  2261   intptr_t pc_delta = desc.buffer - buffer; | 
 |  2262   intptr_t rc_delta = (desc.buffer + desc.buffer_size) - | 
 |  2263                       (buffer + buffer_size_); | 
 |  2264   memmove(desc.buffer, buffer, desc.instr_size); | 
 |  2265   memmove(reloc_info_writer.pos() + rc_delta, | 
 |  2266           reloc_info_writer.pos(), desc.reloc_size); | 
 |  2267  | 
 |  2268   // Switch buffers. | 
 |  2269   DeleteArray(buffer_); | 
 |  2270   buffer_ = desc.buffer; | 
 |  2271   buffer_size_ = desc.buffer_size; | 
 |  2272   pc_ = reinterpret_cast<byte*>(pc_) + pc_delta; | 
 |  2273   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | 
 |  2274                                reloc_info_writer.last_pc() + pc_delta); | 
 |  2275  | 
 |  2276   // None of our relocation types are pc relative pointing outside the code | 
 |  2277   // buffer nor pc absolute pointing inside the code buffer, so there is no need | 
 |  2278   // to relocate any emitted relocation entries. | 
 |  2279  | 
 |  2280   // Relocate pending relocation entries. | 
 |  2281   for (int i = 0; i < num_pending_reloc_info_; i++) { | 
 |  2282     RelocInfo& rinfo = pending_reloc_info_[i]; | 
 |  2283     ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 
 |  2284            rinfo.rmode() != RelocInfo::POSITION); | 
 |  2285     if (rinfo.rmode() != RelocInfo::JS_RETURN) { | 
 |  2286       rinfo.set_pc(rinfo.pc() + pc_delta); | 
 |  2287     } | 
 |  2288   } | 
 |  2289 } | 
 |  2290  | 
 |  2291  | 
 |  2292 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, int64_t data) { | 
 |  2293   // We do not try to reuse pool constants. | 
 |  2294   RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); | 
 |  2295   if (((rmode >= RelocInfo::JS_RETURN) && | 
 |  2296        (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || | 
 |  2297       (rmode == RelocInfo::CONST_POOL)) { | 
 |  2298     // Adjust code for new modes. | 
 |  2299     ASSERT(RelocInfo::IsDebugBreakSlot(rmode) | 
 |  2300            || RelocInfo::IsJSReturn(rmode) | 
 |  2301            || RelocInfo::IsComment(rmode) | 
 |  2302            || RelocInfo::IsPosition(rmode) | 
 |  2303            || RelocInfo::IsConstPool(rmode)); | 
 |  2304     // These modes do not need an entry in the constant pool. | 
 |  2305   } else { | 
 |  2306     ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); | 
 |  2307     if (num_pending_reloc_info_ == 0) { | 
 |  2308       first_const_pool_use_ = pc_offset(); | 
 |  2309     } | 
 |  2310     pending_reloc_info_[num_pending_reloc_info_++] = rinfo; | 
 |  2311     // Make sure the constant pool is not emitted in place of the next | 
 |  2312     // instruction for which we just recorded relocation info. | 
 |  2313     BlockConstPoolFor(1); | 
 |  2314   } | 
 |  2315  | 
 |  2316   if (!RelocInfo::IsNone(rmode)) { | 
 |  2317     // Don't record external references unless the heap will be serialized. | 
 |  2318     if (rmode == RelocInfo::EXTERNAL_REFERENCE) { | 
 |  2319 #ifdef DEBUG | 
 |  2320       if (!Serializer::enabled()) { | 
 |  2321         Serializer::TooLateToEnableNow(); | 
 |  2322       } | 
 |  2323 #endif | 
 |  2324       if (!Serializer::enabled() && !emit_debug_code()) { | 
 |  2325         return; | 
 |  2326       } | 
 |  2327     } | 
 |  2328     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here | 
 |  2329     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { | 
 |  2330       RelocInfo reloc_info_with_ast_id( | 
 |  2331           reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL); | 
 |  2332       ClearRecordedAstId(); | 
 |  2333       reloc_info_writer.Write(&reloc_info_with_ast_id); | 
 |  2334     } else { | 
 |  2335       reloc_info_writer.Write(&rinfo); | 
 |  2336     } | 
 |  2337   } | 
 |  2338 } | 
 |  2339  | 
 |  2340  | 
 |  2341 void Assembler::BlockConstPoolFor(int instructions) { | 
 |  2342   int pc_limit = pc_offset() + instructions * kInstructionSize; | 
 |  2343   if (no_const_pool_before_ < pc_limit) { | 
 |  2344     // If there are some pending entries, the constant pool cannot be blocked | 
 |  2345     // further than first_const_pool_use_ + kMaxDistToPool | 
 |  2346     ASSERT((num_pending_reloc_info_ == 0) || | 
 |  2347            (pc_limit < (first_const_pool_use_ + kMaxDistToPool))); | 
 |  2348     no_const_pool_before_ = pc_limit; | 
 |  2349   } | 
 |  2350  | 
 |  2351   if (next_buffer_check_ < no_const_pool_before_) { | 
 |  2352     next_buffer_check_ = no_const_pool_before_; | 
 |  2353   } | 
 |  2354 } | 
 |  2355  | 
 |  2356  | 
 |  2357 // TODO(all): We are never trying to emit constant pools after unconditional | 
 |  2358 // branches, because we only call it from Assembler::Emit() (or manually). | 
 |  2359 // We should try to enable that. | 
 |  2360 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { | 
 |  2361   // Some short sequence of instruction mustn't be broken up by constant pool | 
 |  2362   // emission, such sequences are protected by calls to BlockConstPoolFor and | 
 |  2363   // BlockConstPoolScope. | 
 |  2364   if (is_const_pool_blocked()) { | 
 |  2365     // Something is wrong if emission is forced and blocked at the same time. | 
 |  2366     ASSERT(!force_emit); | 
 |  2367     return; | 
 |  2368   } | 
 |  2369  | 
 |  2370   // There is nothing to do if there are no pending constant pool entries. | 
 |  2371   if (num_pending_reloc_info_ == 0)  { | 
 |  2372     // Calculate the offset of the next check. | 
 |  2373     next_buffer_check_ = pc_offset() + kCheckPoolInterval; | 
 |  2374     return; | 
 |  2375   } | 
 |  2376  | 
 |  2377   // We emit a constant pool when: | 
 |  2378   //  * requested to do so by parameter force_emit (e.g. after each function). | 
 |  2379   //  * the distance to the first instruction accessing the constant pool is | 
 |  2380   //    kAvgDistToPool or more. | 
 |  2381   //  * no jump is required and the distance to the first instruction accessing | 
 |  2382   //    the constant pool is at least kMaxDistToPool / 2. | 
 |  2383   ASSERT(first_const_pool_use_ >= 0); | 
 |  2384   int dist = pc_offset() - first_const_pool_use_; | 
 |  2385   if (!force_emit && dist < kAvgDistToPool && | 
 |  2386       (require_jump || (dist < (kMaxDistToPool / 2)))) { | 
 |  2387     return; | 
 |  2388   } | 
 |  2389  | 
 |  2390   // Check that the code buffer is large enough before emitting the constant | 
 |  2391   // pool (include the jump over the pool and the constant pool marker and | 
 |  2392   // the gap to the relocation information). | 
 |  2393   int jump_instr = require_jump ? kInstructionSize : 0; | 
 |  2394   int size = jump_instr + kInstructionSize + | 
 |  2395              num_pending_reloc_info_ * kPointerSize; | 
 |  2396   int needed_space = size + kGap; | 
 |  2397   while (buffer_space() <= needed_space) { | 
 |  2398     GrowBuffer(); | 
 |  2399   } | 
 |  2400  | 
 |  2401   { | 
 |  2402     // Block recursive calls to CheckConstPool. | 
 |  2403     BlockConstPoolScope block_const_pool(this); | 
 |  2404     RecordComment("[ Constant Pool"); | 
 |  2405     RecordConstPool(size); | 
 |  2406  | 
 |  2407     // Emit jump over constant pool if necessary. | 
 |  2408     Label after_pool; | 
 |  2409     if (require_jump) { | 
 |  2410       b(&after_pool); | 
 |  2411     } | 
 |  2412  | 
 |  2413     // Emit a constant pool header. The header has two goals: | 
 |  2414     //  1) Encode the size of the constant pool, for use by the disassembler. | 
 |  2415     //  2) Terminate the program, to try to prevent execution from accidentally | 
 |  2416     //     flowing into the constant pool. | 
 |  2417     // The header is therefore made of two a64 instructions: | 
 |  2418     //   ldr xzr, #<size of the constant pool in 32-bit words> | 
 |  2419     //   blr xzr | 
 |  2420     // If executed the code will likely segfault and lr will point to the | 
 |  2421     // beginning of the constant pool. | 
 |  2422     // TODO(all): currently each relocated constant is 64 bits, consider adding | 
 |  2423     // support for 32-bit entries. | 
 |  2424     ConstantPoolMarker(2 * num_pending_reloc_info_); | 
 |  2425     ConstantPoolGuard(); | 
 |  2426  | 
 |  2427     // Emit constant pool entries. | 
 |  2428     for (int i = 0; i < num_pending_reloc_info_; i++) { | 
 |  2429       RelocInfo& rinfo = pending_reloc_info_[i]; | 
 |  2430       ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 
 |  2431              rinfo.rmode() != RelocInfo::POSITION && | 
 |  2432              rinfo.rmode() != RelocInfo::STATEMENT_POSITION && | 
 |  2433              rinfo.rmode() != RelocInfo::CONST_POOL); | 
 |  2434  | 
 |  2435       Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc()); | 
 |  2436       // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. | 
 |  2437       ASSERT(instr->IsLdrLiteral() && | 
 |  2438              instr->ImmLLiteral() == 0); | 
 |  2439  | 
 |  2440       instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); | 
 |  2441       dc64(rinfo.data()); | 
 |  2442     } | 
 |  2443  | 
 |  2444     num_pending_reloc_info_ = 0; | 
 |  2445     first_const_pool_use_ = -1; | 
 |  2446  | 
 |  2447     RecordComment("]"); | 
 |  2448  | 
 |  2449     if (after_pool.is_linked()) { | 
 |  2450       bind(&after_pool); | 
 |  2451     } | 
 |  2452   } | 
 |  2453  | 
 |  2454   // Since a constant pool was just emitted, move the check offset forward by | 
 |  2455   // the standard interval. | 
 |  2456   next_buffer_check_ = pc_offset() + kCheckPoolInterval; | 
 |  2457 } | 
 |  2458  | 
 |  2459  | 
 |  2460 void Assembler::RecordComment(const char* msg) { | 
 |  2461   if (FLAG_code_comments) { | 
 |  2462     CheckBuffer(); | 
 |  2463     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); | 
 |  2464   } | 
 |  2465 } | 
 |  2466  | 
 |  2467  | 
 |  2468 int Assembler::buffer_space() const { | 
 |  2469   return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_); | 
 |  2470 } | 
 |  2471  | 
 |  2472  | 
 |  2473 void Assembler::RecordJSReturn() { | 
 |  2474   positions_recorder()->WriteRecordedPositions(); | 
 |  2475   CheckBuffer(); | 
 |  2476   RecordRelocInfo(RelocInfo::JS_RETURN); | 
 |  2477 } | 
 |  2478  | 
 |  2479  | 
 |  2480 void Assembler::RecordDebugBreakSlot() { | 
 |  2481   positions_recorder()->WriteRecordedPositions(); | 
 |  2482   CheckBuffer(); | 
 |  2483   RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); | 
 |  2484 } | 
 |  2485  | 
 |  2486  | 
 |  2487 void Assembler::RecordConstPool(int size) { | 
 |  2488   // We only need this for debugger support, to correctly compute offsets in the | 
 |  2489   // code. | 
 |  2490 #ifdef ENABLE_DEBUGGER_SUPPORT | 
 |  2491   RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); | 
 |  2492 #endif | 
 |  2493 } | 
 |  2494  | 
 |  2495  | 
 |  2496 } }  // namespace v8::internal | 
 |  2497  | 
 |  2498 #endif  // V8_TARGET_ARCH_A64 | 
| OLD | NEW |