| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 #ifndef V8_A64_CODE_STUBS_A64_H_ | |
| 29 #define V8_A64_CODE_STUBS_A64_H_ | |
| 30 | |
| 31 #include "ic-inl.h" | |
| 32 | |
| 33 namespace v8 { | |
| 34 namespace internal { | |
| 35 | |
| 36 | |
| 37 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); | |
| 38 | |
| 39 | |
| 40 class StoreBufferOverflowStub: public PlatformCodeStub { | |
| 41 public: | |
| 42 explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) | |
| 43 : save_doubles_(save_fp) { } | |
| 44 | |
| 45 void Generate(MacroAssembler* masm); | |
| 46 | |
| 47 static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); | |
| 48 virtual bool SometimesSetsUpAFrame() { return false; } | |
| 49 | |
| 50 private: | |
| 51 SaveFPRegsMode save_doubles_; | |
| 52 | |
| 53 Major MajorKey() { return StoreBufferOverflow; } | |
| 54 int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } | |
| 55 }; | |
| 56 | |
| 57 | |
| 58 class StringHelper : public AllStatic { | |
| 59 public: | |
| 60 // TODO(all): These don't seem to be used any more. Delete them. | |
| 61 | |
| 62 // Generate string hash. | |
| 63 static void GenerateHashInit(MacroAssembler* masm, | |
| 64 Register hash, | |
| 65 Register character); | |
| 66 | |
| 67 static void GenerateHashAddCharacter(MacroAssembler* masm, | |
| 68 Register hash, | |
| 69 Register character); | |
| 70 | |
| 71 static void GenerateHashGetHash(MacroAssembler* masm, | |
| 72 Register hash, | |
| 73 Register scratch); | |
| 74 | |
| 75 private: | |
| 76 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); | |
| 77 }; | |
| 78 | |
| 79 | |
| 80 class RecordWriteStub: public PlatformCodeStub { | |
| 81 public: | |
| 82 // Stub to record the write of 'value' at 'address' in 'object'. | |
| 83 // Typically 'address' = 'object' + <some offset>. | |
| 84 // See MacroAssembler::RecordWriteField() for example. | |
| 85 RecordWriteStub(Register object, | |
| 86 Register value, | |
| 87 Register address, | |
| 88 RememberedSetAction remembered_set_action, | |
| 89 SaveFPRegsMode fp_mode) | |
| 90 : object_(object), | |
| 91 value_(value), | |
| 92 address_(address), | |
| 93 remembered_set_action_(remembered_set_action), | |
| 94 save_fp_regs_mode_(fp_mode), | |
| 95 regs_(object, // An input reg. | |
| 96 address, // An input reg. | |
| 97 value) { // One scratch reg. | |
| 98 } | |
| 99 | |
| 100 enum Mode { | |
| 101 STORE_BUFFER_ONLY, | |
| 102 INCREMENTAL, | |
| 103 INCREMENTAL_COMPACTION | |
| 104 }; | |
| 105 | |
| 106 virtual bool SometimesSetsUpAFrame() { return false; } | |
| 107 | |
| 108 static Mode GetMode(Code* stub) { | |
| 109 // Find the mode depending on the first two instructions. | |
| 110 Instruction* instr1 = | |
| 111 reinterpret_cast<Instruction*>(stub->instruction_start()); | |
| 112 Instruction* instr2 = instr1->following(); | |
| 113 | |
| 114 if (instr1->IsUncondBranchImm()) { | |
| 115 ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code())); | |
| 116 return INCREMENTAL; | |
| 117 } | |
| 118 | |
| 119 ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code())); | |
| 120 | |
| 121 if (instr2->IsUncondBranchImm()) { | |
| 122 return INCREMENTAL_COMPACTION; | |
| 123 } | |
| 124 | |
| 125 ASSERT(instr2->IsPCRelAddressing()); | |
| 126 | |
| 127 return STORE_BUFFER_ONLY; | |
| 128 } | |
| 129 | |
| 130 // We patch the two first instructions of the stub back and forth between an | |
| 131 // adr and branch when we start and stop incremental heap marking. | |
| 132 // The branch is | |
| 133 // b label | |
| 134 // The adr is | |
| 135 // adr xzr label | |
| 136 // so effectively a nop. | |
| 137 static void Patch(Code* stub, Mode mode) { | |
| 138 // We are going to patch the two first instructions of the stub. | |
| 139 PatchingAssembler patcher( | |
| 140 reinterpret_cast<Instruction*>(stub->instruction_start()), 2); | |
| 141 Instruction* instr1 = patcher.InstructionAt(0); | |
| 142 Instruction* instr2 = patcher.InstructionAt(kInstructionSize); | |
| 143 // Instructions must be either 'adr' or 'b'. | |
| 144 ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm()); | |
| 145 ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm()); | |
| 146 // Retrieve the offsets to the labels. | |
| 147 int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset(); | |
| 148 int32_t offset_to_incremental_compacting = instr2->ImmPCOffset(); | |
| 149 | |
| 150 switch (mode) { | |
| 151 case STORE_BUFFER_ONLY: | |
| 152 ASSERT(GetMode(stub) == INCREMENTAL || | |
| 153 GetMode(stub) == INCREMENTAL_COMPACTION); | |
| 154 patcher.adr(xzr, offset_to_incremental_noncompacting); | |
| 155 patcher.adr(xzr, offset_to_incremental_compacting); | |
| 156 break; | |
| 157 case INCREMENTAL: | |
| 158 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); | |
| 159 patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2); | |
| 160 patcher.adr(xzr, offset_to_incremental_compacting); | |
| 161 break; | |
| 162 case INCREMENTAL_COMPACTION: | |
| 163 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); | |
| 164 patcher.adr(xzr, offset_to_incremental_noncompacting); | |
| 165 patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2); | |
| 166 break; | |
| 167 } | |
| 168 ASSERT(GetMode(stub) == mode); | |
| 169 } | |
| 170 | |
| 171 private: | |
| 172 // This is a helper class to manage the registers associated with the stub. | |
| 173 // The 'object' and 'address' registers must be preserved. | |
| 174 class RegisterAllocation { | |
| 175 public: | |
| 176 RegisterAllocation(Register object, | |
| 177 Register address, | |
| 178 Register scratch) | |
| 179 : object_(object), | |
| 180 address_(address), | |
| 181 scratch0_(scratch), | |
| 182 saved_regs_(kCallerSaved) { | |
| 183 ASSERT(!AreAliased(scratch, object, address)); | |
| 184 | |
| 185 // We would like to require more scratch registers for this stub, | |
| 186 // but the number of registers comes down to the ones used in | |
| 187 // FullCodeGen::SetVar(), which is architecture independent. | |
| 188 // We allocate 2 extra scratch registers that we'll save on the stack. | |
| 189 CPURegList pool_available = GetValidRegistersForAllocation(); | |
| 190 CPURegList used_regs(object, address, scratch); | |
| 191 pool_available.Remove(used_regs); | |
| 192 scratch1_ = Register(pool_available.PopLowestIndex()); | |
| 193 scratch2_ = Register(pool_available.PopLowestIndex()); | |
| 194 | |
| 195 // SaveCallerRegisters method needs to save caller saved register, however | |
| 196 // we don't bother saving ip0 and ip1 because they are used as scratch | |
| 197 // registers by the MacroAssembler. | |
| 198 saved_regs_.Remove(ip0); | |
| 199 saved_regs_.Remove(ip1); | |
| 200 | |
| 201 // The scratch registers will be restored by other means so we don't need | |
| 202 // to save them with the other caller saved registers. | |
| 203 saved_regs_.Remove(scratch0_); | |
| 204 saved_regs_.Remove(scratch1_); | |
| 205 saved_regs_.Remove(scratch2_); | |
| 206 } | |
| 207 | |
| 208 void Save(MacroAssembler* masm) { | |
| 209 // We don't have to save scratch0_ because it was given to us as | |
| 210 // a scratch register. | |
| 211 masm->Push(scratch1_, scratch2_); | |
| 212 } | |
| 213 | |
| 214 void Restore(MacroAssembler* masm) { | |
| 215 masm->Pop(scratch2_, scratch1_); | |
| 216 } | |
| 217 | |
| 218 // If we have to call into C then we need to save and restore all caller- | |
| 219 // saved registers that were not already preserved. | |
| 220 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { | |
| 221 // TODO(all): This can be very expensive, and it is likely that not every | |
| 222 // register will need to be preserved. Can we improve this? | |
| 223 masm->PushCPURegList(saved_regs_); | |
| 224 if (mode == kSaveFPRegs) { | |
| 225 masm->PushCPURegList(kCallerSavedFP); | |
| 226 } | |
| 227 } | |
| 228 | |
| 229 void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { | |
| 230 // TODO(all): This can be very expensive, and it is likely that not every | |
| 231 // register will need to be preserved. Can we improve this? | |
| 232 if (mode == kSaveFPRegs) { | |
| 233 masm->PopCPURegList(kCallerSavedFP); | |
| 234 } | |
| 235 masm->PopCPURegList(saved_regs_); | |
| 236 } | |
| 237 | |
| 238 Register object() { return object_; } | |
| 239 Register address() { return address_; } | |
| 240 Register scratch0() { return scratch0_; } | |
| 241 Register scratch1() { return scratch1_; } | |
| 242 Register scratch2() { return scratch2_; } | |
| 243 | |
| 244 private: | |
| 245 Register object_; | |
| 246 Register address_; | |
| 247 Register scratch0_; | |
| 248 Register scratch1_; | |
| 249 Register scratch2_; | |
| 250 CPURegList saved_regs_; | |
| 251 | |
| 252 // TODO(all): We should consider moving this somewhere else. | |
| 253 static CPURegList GetValidRegistersForAllocation() { | |
| 254 // The list of valid registers for allocation is defined as all the | |
| 255 // registers without those with a special meaning. | |
| 256 // | |
| 257 // The default list excludes registers x26 to x31 because they are | |
| 258 // reserved for the following purpose: | |
| 259 // - x26 root register | |
| 260 // - x27 context pointer register | |
| 261 // - x28 jssp | |
| 262 // - x29 frame pointer | |
| 263 // - x30 link register(lr) | |
| 264 // - x31 xzr/stack pointer | |
| 265 CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25); | |
| 266 | |
| 267 // We also remove MacroAssembler's scratch registers. | |
| 268 list.Remove(ip0); | |
| 269 list.Remove(ip1); | |
| 270 list.Remove(x8); | |
| 271 list.Remove(x9); | |
| 272 | |
| 273 return list; | |
| 274 } | |
| 275 | |
| 276 friend class RecordWriteStub; | |
| 277 }; | |
| 278 | |
| 279 // A list of stub variants which are pregenerated. | |
| 280 // The variants are stored in the same format as the minor key, so | |
| 281 // MinorKeyFor() can be used to populate and check this list. | |
| 282 static const int kAheadOfTime[]; | |
| 283 | |
| 284 void Generate(MacroAssembler* masm); | |
| 285 void GenerateIncremental(MacroAssembler* masm, Mode mode); | |
| 286 | |
| 287 enum OnNoNeedToInformIncrementalMarker { | |
| 288 kReturnOnNoNeedToInformIncrementalMarker, | |
| 289 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker | |
| 290 }; | |
| 291 | |
| 292 void CheckNeedsToInformIncrementalMarker( | |
| 293 MacroAssembler* masm, | |
| 294 OnNoNeedToInformIncrementalMarker on_no_need, | |
| 295 Mode mode); | |
| 296 void InformIncrementalMarker(MacroAssembler* masm); | |
| 297 | |
| 298 Major MajorKey() { return RecordWrite; } | |
| 299 | |
| 300 int MinorKey() { | |
| 301 return MinorKeyFor(object_, value_, address_, remembered_set_action_, | |
| 302 save_fp_regs_mode_); | |
| 303 } | |
| 304 | |
| 305 static int MinorKeyFor(Register object, | |
| 306 Register value, | |
| 307 Register address, | |
| 308 RememberedSetAction action, | |
| 309 SaveFPRegsMode fp_mode) { | |
| 310 ASSERT(object.Is64Bits()); | |
| 311 ASSERT(value.Is64Bits()); | |
| 312 ASSERT(address.Is64Bits()); | |
| 313 return ObjectBits::encode(object.code()) | | |
| 314 ValueBits::encode(value.code()) | | |
| 315 AddressBits::encode(address.code()) | | |
| 316 RememberedSetActionBits::encode(action) | | |
| 317 SaveFPRegsModeBits::encode(fp_mode); | |
| 318 } | |
| 319 | |
| 320 void Activate(Code* code) { | |
| 321 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); | |
| 322 } | |
| 323 | |
| 324 class ObjectBits: public BitField<int, 0, 5> {}; | |
| 325 class ValueBits: public BitField<int, 5, 5> {}; | |
| 326 class AddressBits: public BitField<int, 10, 5> {}; | |
| 327 class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {}; | |
| 328 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {}; | |
| 329 | |
| 330 Register object_; | |
| 331 Register value_; | |
| 332 Register address_; | |
| 333 RememberedSetAction remembered_set_action_; | |
| 334 SaveFPRegsMode save_fp_regs_mode_; | |
| 335 Label slow_; | |
| 336 RegisterAllocation regs_; | |
| 337 }; | |
| 338 | |
| 339 | |
| 340 // Helper to call C++ functions from generated code. The caller must prepare | |
| 341 // the exit frame before doing the call with GenerateCall. | |
| 342 class DirectCEntryStub: public PlatformCodeStub { | |
| 343 public: | |
| 344 DirectCEntryStub() {} | |
| 345 void Generate(MacroAssembler* masm); | |
| 346 void GenerateCall(MacroAssembler* masm, Register target); | |
| 347 | |
| 348 private: | |
| 349 Major MajorKey() { return DirectCEntry; } | |
| 350 int MinorKey() { return 0; } | |
| 351 | |
| 352 bool NeedsImmovableCode() { return true; } | |
| 353 }; | |
| 354 | |
| 355 | |
| 356 class NameDictionaryLookupStub: public PlatformCodeStub { | |
| 357 public: | |
| 358 enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; | |
| 359 | |
| 360 explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } | |
| 361 | |
| 362 void Generate(MacroAssembler* masm); | |
| 363 | |
| 364 static void GenerateNegativeLookup(MacroAssembler* masm, | |
| 365 Label* miss, | |
| 366 Label* done, | |
| 367 Register receiver, | |
| 368 Register properties, | |
| 369 Handle<Name> name, | |
| 370 Register scratch0); | |
| 371 | |
| 372 static void GeneratePositiveLookup(MacroAssembler* masm, | |
| 373 Label* miss, | |
| 374 Label* done, | |
| 375 Register elements, | |
| 376 Register name, | |
| 377 Register scratch1, | |
| 378 Register scratch2); | |
| 379 | |
| 380 virtual bool SometimesSetsUpAFrame() { return false; } | |
| 381 | |
| 382 private: | |
| 383 static const int kInlinedProbes = 4; | |
| 384 static const int kTotalProbes = 20; | |
| 385 | |
| 386 static const int kCapacityOffset = | |
| 387 NameDictionary::kHeaderSize + | |
| 388 NameDictionary::kCapacityIndex * kPointerSize; | |
| 389 | |
| 390 static const int kElementsStartOffset = | |
| 391 NameDictionary::kHeaderSize + | |
| 392 NameDictionary::kElementsStartIndex * kPointerSize; | |
| 393 | |
| 394 Major MajorKey() { return NameDictionaryLookup; } | |
| 395 | |
| 396 int MinorKey() { | |
| 397 return LookupModeBits::encode(mode_); | |
| 398 } | |
| 399 | |
| 400 class LookupModeBits: public BitField<LookupMode, 0, 1> {}; | |
| 401 | |
| 402 LookupMode mode_; | |
| 403 }; | |
| 404 | |
| 405 | |
| 406 class SubStringStub: public PlatformCodeStub { | |
| 407 public: | |
| 408 SubStringStub() {} | |
| 409 | |
| 410 private: | |
| 411 Major MajorKey() { return SubString; } | |
| 412 int MinorKey() { return 0; } | |
| 413 | |
| 414 void Generate(MacroAssembler* masm); | |
| 415 }; | |
| 416 | |
| 417 | |
| 418 class StringCompareStub: public PlatformCodeStub { | |
| 419 public: | |
| 420 StringCompareStub() { } | |
| 421 | |
| 422 // Compares two flat ASCII strings and returns result in x0. | |
| 423 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | |
| 424 Register left, | |
| 425 Register right, | |
| 426 Register scratch1, | |
| 427 Register scratch2, | |
| 428 Register scratch3, | |
| 429 Register scratch4); | |
| 430 | |
| 431 // Compare two flat ASCII strings for equality and returns result | |
| 432 // in x0. | |
| 433 static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, | |
| 434 Register left, | |
| 435 Register right, | |
| 436 Register scratch1, | |
| 437 Register scratch2, | |
| 438 Register scratch3); | |
| 439 | |
| 440 private: | |
| 441 virtual Major MajorKey() { return StringCompare; } | |
| 442 virtual int MinorKey() { return 0; } | |
| 443 virtual void Generate(MacroAssembler* masm); | |
| 444 | |
| 445 static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, | |
| 446 Register left, | |
| 447 Register right, | |
| 448 Register length, | |
| 449 Register scratch1, | |
| 450 Register scratch2, | |
| 451 Label* chars_not_equal); | |
| 452 }; | |
| 453 | |
| 454 | |
| 455 struct PlatformCallInterfaceDescriptor { | |
| 456 explicit PlatformCallInterfaceDescriptor( | |
| 457 TargetAddressStorageMode storage_mode) | |
| 458 : storage_mode_(storage_mode) { } | |
| 459 | |
| 460 TargetAddressStorageMode storage_mode() { return storage_mode_; } | |
| 461 | |
| 462 private: | |
| 463 TargetAddressStorageMode storage_mode_; | |
| 464 }; | |
| 465 | |
| 466 | |
| 467 } } // namespace v8::internal | |
| 468 | |
| 469 #endif // V8_A64_CODE_STUBS_A64_H_ | |
| OLD | NEW |