| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 #ifndef V8_A64_MACRO_ASSEMBLER_A64_H_ | |
| 29 #define V8_A64_MACRO_ASSEMBLER_A64_H_ | |
| 30 | |
| 31 #include <vector> | |
| 32 | |
| 33 #include "v8globals.h" | |
| 34 #include "globals.h" | |
| 35 | |
| 36 #include "a64/assembler-a64-inl.h" | |
| 37 | |
| 38 namespace v8 { | |
| 39 namespace internal { | |
| 40 | |
| 41 #define LS_MACRO_LIST(V) \ | |
| 42 V(Ldrb, Register&, rt, LDRB_w) \ | |
| 43 V(Strb, Register&, rt, STRB_w) \ | |
| 44 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \ | |
| 45 V(Ldrh, Register&, rt, LDRH_w) \ | |
| 46 V(Strh, Register&, rt, STRH_w) \ | |
| 47 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \ | |
| 48 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \ | |
| 49 V(Str, CPURegister&, rt, StoreOpFor(rt)) \ | |
| 50 V(Ldrsw, Register&, rt, LDRSW_x) | |
| 51 | |
| 52 | |
| 53 // ---------------------------------------------------------------------------- | |
| 54 // Static helper functions | |
| 55 | |
| 56 // Generate a MemOperand for loading a field from an object. | |
| 57 inline MemOperand FieldMemOperand(Register object, int offset); | |
| 58 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset); | |
| 59 | |
| 60 // Generate a MemOperand for loading a SMI from memory. | |
| 61 inline MemOperand UntagSmiMemOperand(Register object, int offset); | |
| 62 | |
| 63 | |
| 64 // ---------------------------------------------------------------------------- | |
| 65 // MacroAssembler | |
| 66 | |
| 67 enum BranchType { | |
| 68 // Copies of architectural conditions. | |
| 69 // The associated conditions can be used in place of those, the code will | |
| 70 // take care of reinterpreting them with the correct type. | |
| 71 integer_eq = eq, | |
| 72 integer_ne = ne, | |
| 73 integer_hs = hs, | |
| 74 integer_lo = lo, | |
| 75 integer_mi = mi, | |
| 76 integer_pl = pl, | |
| 77 integer_vs = vs, | |
| 78 integer_vc = vc, | |
| 79 integer_hi = hi, | |
| 80 integer_ls = ls, | |
| 81 integer_ge = ge, | |
| 82 integer_lt = lt, | |
| 83 integer_gt = gt, | |
| 84 integer_le = le, | |
| 85 integer_al = al, | |
| 86 integer_nv = nv, | |
| 87 | |
| 88 // These two are *different* from the architectural codes al and nv. | |
| 89 // 'always' is used to generate unconditional branches. | |
| 90 // 'never' is used to not generate a branch (generally as the inverse | |
| 91 // branch type of 'always). | |
| 92 always, never, | |
| 93 // cbz and cbnz | |
| 94 reg_zero, reg_not_zero, | |
| 95 // tbz and tbnz | |
| 96 reg_bit_clear, reg_bit_set, | |
| 97 | |
| 98 // Aliases. | |
| 99 kBranchTypeFirstCondition = eq, | |
| 100 kBranchTypeLastCondition = nv, | |
| 101 kBranchTypeFirstUsingReg = reg_zero, | |
| 102 kBranchTypeFirstUsingBit = reg_bit_clear | |
| 103 }; | |
| 104 | |
| 105 inline BranchType InvertBranchType(BranchType type) { | |
| 106 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { | |
| 107 return static_cast<BranchType>( | |
| 108 InvertCondition(static_cast<Condition>(type))); | |
| 109 } else { | |
| 110 return static_cast<BranchType>(type ^ 1); | |
| 111 } | |
| 112 } | |
| 113 | |
| 114 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; | |
| 115 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; | |
| 116 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; | |
| 117 enum TargetAddressStorageMode { | |
| 118 CAN_INLINE_TARGET_ADDRESS, | |
| 119 NEVER_INLINE_TARGET_ADDRESS | |
| 120 }; | |
| 121 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag }; | |
| 122 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles }; | |
| 123 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong }; | |
| 124 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg }; | |
| 125 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 }; | |
| 126 | |
| 127 class MacroAssembler : public Assembler { | |
| 128 public: | |
| 129 MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size); | |
| 130 | |
| 131 inline Handle<Object> CodeObject(); | |
| 132 | |
| 133 // Instruction set functions ------------------------------------------------ | |
| 134 // Logical macros. | |
| 135 inline void And(const Register& rd, | |
| 136 const Register& rn, | |
| 137 const Operand& operand); | |
| 138 inline void Ands(const Register& rd, | |
| 139 const Register& rn, | |
| 140 const Operand& operand); | |
| 141 inline void Bic(const Register& rd, | |
| 142 const Register& rn, | |
| 143 const Operand& operand); | |
| 144 inline void Bics(const Register& rd, | |
| 145 const Register& rn, | |
| 146 const Operand& operand); | |
| 147 inline void Orr(const Register& rd, | |
| 148 const Register& rn, | |
| 149 const Operand& operand); | |
| 150 inline void Orn(const Register& rd, | |
| 151 const Register& rn, | |
| 152 const Operand& operand); | |
| 153 inline void Eor(const Register& rd, | |
| 154 const Register& rn, | |
| 155 const Operand& operand); | |
| 156 inline void Eon(const Register& rd, | |
| 157 const Register& rn, | |
| 158 const Operand& operand); | |
| 159 inline void Tst(const Register& rn, const Operand& operand); | |
| 160 void LogicalMacro(const Register& rd, | |
| 161 const Register& rn, | |
| 162 const Operand& operand, | |
| 163 LogicalOp op); | |
| 164 | |
| 165 // Add and sub macros. | |
| 166 inline void Add(const Register& rd, | |
| 167 const Register& rn, | |
| 168 const Operand& operand); | |
| 169 inline void Adds(const Register& rd, | |
| 170 const Register& rn, | |
| 171 const Operand& operand); | |
| 172 inline void Sub(const Register& rd, | |
| 173 const Register& rn, | |
| 174 const Operand& operand); | |
| 175 inline void Subs(const Register& rd, | |
| 176 const Register& rn, | |
| 177 const Operand& operand); | |
| 178 inline void Cmn(const Register& rn, const Operand& operand); | |
| 179 inline void Cmp(const Register& rn, const Operand& operand); | |
| 180 inline void Neg(const Register& rd, | |
| 181 const Operand& operand); | |
| 182 inline void Negs(const Register& rd, | |
| 183 const Operand& operand); | |
| 184 | |
| 185 void AddSubMacro(const Register& rd, | |
| 186 const Register& rn, | |
| 187 const Operand& operand, | |
| 188 FlagsUpdate S, | |
| 189 AddSubOp op); | |
| 190 | |
| 191 // Add/sub with carry macros. | |
| 192 inline void Adc(const Register& rd, | |
| 193 const Register& rn, | |
| 194 const Operand& operand); | |
| 195 inline void Adcs(const Register& rd, | |
| 196 const Register& rn, | |
| 197 const Operand& operand); | |
| 198 inline void Sbc(const Register& rd, | |
| 199 const Register& rn, | |
| 200 const Operand& operand); | |
| 201 inline void Sbcs(const Register& rd, | |
| 202 const Register& rn, | |
| 203 const Operand& operand); | |
| 204 inline void Ngc(const Register& rd, | |
| 205 const Operand& operand); | |
| 206 inline void Ngcs(const Register& rd, | |
| 207 const Operand& operand); | |
| 208 void AddSubWithCarryMacro(const Register& rd, | |
| 209 const Register& rn, | |
| 210 const Operand& operand, | |
| 211 FlagsUpdate S, | |
| 212 AddSubWithCarryOp op); | |
| 213 | |
| 214 // Move macros. | |
| 215 void Mov(const Register& rd, | |
| 216 const Operand& operand, | |
| 217 DiscardMoveMode discard_mode = kDontDiscardForSameWReg); | |
| 218 void Mov(const Register& rd, uint64_t imm); | |
| 219 inline void Mvn(const Register& rd, uint64_t imm); | |
| 220 void Mvn(const Register& rd, const Operand& operand); | |
| 221 static bool IsImmMovn(uint64_t imm, unsigned reg_size); | |
| 222 static bool IsImmMovz(uint64_t imm, unsigned reg_size); | |
| 223 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); | |
| 224 | |
| 225 // Conditional macros. | |
| 226 inline void Ccmp(const Register& rn, | |
| 227 const Operand& operand, | |
| 228 StatusFlags nzcv, | |
| 229 Condition cond); | |
| 230 inline void Ccmn(const Register& rn, | |
| 231 const Operand& operand, | |
| 232 StatusFlags nzcv, | |
| 233 Condition cond); | |
| 234 void ConditionalCompareMacro(const Register& rn, | |
| 235 const Operand& operand, | |
| 236 StatusFlags nzcv, | |
| 237 Condition cond, | |
| 238 ConditionalCompareOp op); | |
| 239 void Csel(const Register& rd, | |
| 240 const Register& rn, | |
| 241 const Operand& operand, | |
| 242 Condition cond); | |
| 243 | |
| 244 // Load/store macros. | |
| 245 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ | |
| 246 inline void FN(const REGTYPE REG, const MemOperand& addr); | |
| 247 LS_MACRO_LIST(DECLARE_FUNCTION) | |
| 248 #undef DECLARE_FUNCTION | |
| 249 | |
| 250 void LoadStoreMacro(const CPURegister& rt, | |
| 251 const MemOperand& addr, | |
| 252 LoadStoreOp op); | |
| 253 | |
| 254 // V8-specific load/store helpers. | |
| 255 void Load(const Register& rt, const MemOperand& addr, Representation r); | |
| 256 void Store(const Register& rt, const MemOperand& addr, Representation r); | |
| 257 | |
| 258 // Remaining instructions are simple pass-through calls to the assembler. | |
| 259 inline void Adr(const Register& rd, Label* label); | |
| 260 inline void Asr(const Register& rd, const Register& rn, unsigned shift); | |
| 261 inline void Asr(const Register& rd, const Register& rn, const Register& rm); | |
| 262 | |
| 263 // Branch type inversion relies on these relations. | |
| 264 STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && | |
| 265 (reg_bit_clear == (reg_bit_set ^ 1)) && | |
| 266 (always == (never ^ 1))); | |
| 267 | |
| 268 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); | |
| 269 | |
| 270 inline void B(Label* label); | |
| 271 inline void B(Condition cond, Label* label); | |
| 272 void B(Label* label, Condition cond); | |
| 273 inline void Bfi(const Register& rd, | |
| 274 const Register& rn, | |
| 275 unsigned lsb, | |
| 276 unsigned width); | |
| 277 inline void Bfxil(const Register& rd, | |
| 278 const Register& rn, | |
| 279 unsigned lsb, | |
| 280 unsigned width); | |
| 281 inline void Bind(Label* label); | |
| 282 inline void Bl(Label* label); | |
| 283 inline void Blr(const Register& xn); | |
| 284 inline void Br(const Register& xn); | |
| 285 inline void Brk(int code); | |
| 286 void Cbnz(const Register& rt, Label* label); | |
| 287 void Cbz(const Register& rt, Label* label); | |
| 288 inline void Cinc(const Register& rd, const Register& rn, Condition cond); | |
| 289 inline void Cinv(const Register& rd, const Register& rn, Condition cond); | |
| 290 inline void Cls(const Register& rd, const Register& rn); | |
| 291 inline void Clz(const Register& rd, const Register& rn); | |
| 292 inline void Cneg(const Register& rd, const Register& rn, Condition cond); | |
| 293 inline void CzeroX(const Register& rd, Condition cond); | |
| 294 inline void CmovX(const Register& rd, const Register& rn, Condition cond); | |
| 295 inline void Cset(const Register& rd, Condition cond); | |
| 296 inline void Csetm(const Register& rd, Condition cond); | |
| 297 inline void Csinc(const Register& rd, | |
| 298 const Register& rn, | |
| 299 const Register& rm, | |
| 300 Condition cond); | |
| 301 inline void Csinv(const Register& rd, | |
| 302 const Register& rn, | |
| 303 const Register& rm, | |
| 304 Condition cond); | |
| 305 inline void Csneg(const Register& rd, | |
| 306 const Register& rn, | |
| 307 const Register& rm, | |
| 308 Condition cond); | |
| 309 inline void Dmb(BarrierDomain domain, BarrierType type); | |
| 310 inline void Dsb(BarrierDomain domain, BarrierType type); | |
| 311 inline void Debug(const char* message, uint32_t code, Instr params = BREAK); | |
| 312 inline void Extr(const Register& rd, | |
| 313 const Register& rn, | |
| 314 const Register& rm, | |
| 315 unsigned lsb); | |
| 316 inline void Fabs(const FPRegister& fd, const FPRegister& fn); | |
| 317 inline void Fadd(const FPRegister& fd, | |
| 318 const FPRegister& fn, | |
| 319 const FPRegister& fm); | |
| 320 inline void Fccmp(const FPRegister& fn, | |
| 321 const FPRegister& fm, | |
| 322 StatusFlags nzcv, | |
| 323 Condition cond); | |
| 324 inline void Fcmp(const FPRegister& fn, const FPRegister& fm); | |
| 325 inline void Fcmp(const FPRegister& fn, double value); | |
| 326 inline void Fcsel(const FPRegister& fd, | |
| 327 const FPRegister& fn, | |
| 328 const FPRegister& fm, | |
| 329 Condition cond); | |
| 330 inline void Fcvt(const FPRegister& fd, const FPRegister& fn); | |
| 331 inline void Fcvtas(const Register& rd, const FPRegister& fn); | |
| 332 inline void Fcvtau(const Register& rd, const FPRegister& fn); | |
| 333 inline void Fcvtms(const Register& rd, const FPRegister& fn); | |
| 334 inline void Fcvtmu(const Register& rd, const FPRegister& fn); | |
| 335 inline void Fcvtns(const Register& rd, const FPRegister& fn); | |
| 336 inline void Fcvtnu(const Register& rd, const FPRegister& fn); | |
| 337 inline void Fcvtzs(const Register& rd, const FPRegister& fn); | |
| 338 inline void Fcvtzu(const Register& rd, const FPRegister& fn); | |
| 339 inline void Fdiv(const FPRegister& fd, | |
| 340 const FPRegister& fn, | |
| 341 const FPRegister& fm); | |
| 342 inline void Fmadd(const FPRegister& fd, | |
| 343 const FPRegister& fn, | |
| 344 const FPRegister& fm, | |
| 345 const FPRegister& fa); | |
| 346 inline void Fmax(const FPRegister& fd, | |
| 347 const FPRegister& fn, | |
| 348 const FPRegister& fm); | |
| 349 inline void Fmaxnm(const FPRegister& fd, | |
| 350 const FPRegister& fn, | |
| 351 const FPRegister& fm); | |
| 352 inline void Fmin(const FPRegister& fd, | |
| 353 const FPRegister& fn, | |
| 354 const FPRegister& fm); | |
| 355 inline void Fminnm(const FPRegister& fd, | |
| 356 const FPRegister& fn, | |
| 357 const FPRegister& fm); | |
| 358 inline void Fmov(FPRegister fd, FPRegister fn); | |
| 359 inline void Fmov(FPRegister fd, Register rn); | |
| 360 inline void Fmov(FPRegister fd, double imm); | |
| 361 inline void Fmov(Register rd, FPRegister fn); | |
| 362 inline void Fmsub(const FPRegister& fd, | |
| 363 const FPRegister& fn, | |
| 364 const FPRegister& fm, | |
| 365 const FPRegister& fa); | |
| 366 inline void Fmul(const FPRegister& fd, | |
| 367 const FPRegister& fn, | |
| 368 const FPRegister& fm); | |
| 369 inline void Fneg(const FPRegister& fd, const FPRegister& fn); | |
| 370 inline void Fnmadd(const FPRegister& fd, | |
| 371 const FPRegister& fn, | |
| 372 const FPRegister& fm, | |
| 373 const FPRegister& fa); | |
| 374 inline void Fnmsub(const FPRegister& fd, | |
| 375 const FPRegister& fn, | |
| 376 const FPRegister& fm, | |
| 377 const FPRegister& fa); | |
| 378 inline void Frinta(const FPRegister& fd, const FPRegister& fn); | |
| 379 inline void Frintn(const FPRegister& fd, const FPRegister& fn); | |
| 380 inline void Frintz(const FPRegister& fd, const FPRegister& fn); | |
| 381 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn); | |
| 382 inline void Fsub(const FPRegister& fd, | |
| 383 const FPRegister& fn, | |
| 384 const FPRegister& fm); | |
| 385 inline void Hint(SystemHint code); | |
| 386 inline void Hlt(int code); | |
| 387 inline void Isb(); | |
| 388 inline void Ldnp(const CPURegister& rt, | |
| 389 const CPURegister& rt2, | |
| 390 const MemOperand& src); | |
| 391 inline void Ldp(const CPURegister& rt, | |
| 392 const CPURegister& rt2, | |
| 393 const MemOperand& src); | |
| 394 inline void Ldpsw(const Register& rt, | |
| 395 const Register& rt2, | |
| 396 const MemOperand& src); | |
| 397 inline void Ldr(const FPRegister& ft, double imm); | |
| 398 inline void Ldr(const Register& rt, uint64_t imm); | |
| 399 inline void Lsl(const Register& rd, const Register& rn, unsigned shift); | |
| 400 inline void Lsl(const Register& rd, const Register& rn, const Register& rm); | |
| 401 inline void Lsr(const Register& rd, const Register& rn, unsigned shift); | |
| 402 inline void Lsr(const Register& rd, const Register& rn, const Register& rm); | |
| 403 inline void Madd(const Register& rd, | |
| 404 const Register& rn, | |
| 405 const Register& rm, | |
| 406 const Register& ra); | |
| 407 inline void Mneg(const Register& rd, const Register& rn, const Register& rm); | |
| 408 inline void Mov(const Register& rd, const Register& rm); | |
| 409 inline void Movk(const Register& rd, uint64_t imm, int shift = -1); | |
| 410 inline void Mrs(const Register& rt, SystemRegister sysreg); | |
| 411 inline void Msr(SystemRegister sysreg, const Register& rt); | |
| 412 inline void Msub(const Register& rd, | |
| 413 const Register& rn, | |
| 414 const Register& rm, | |
| 415 const Register& ra); | |
| 416 inline void Mul(const Register& rd, const Register& rn, const Register& rm); | |
| 417 inline void Nop() { nop(); } | |
| 418 inline void Rbit(const Register& rd, const Register& rn); | |
| 419 inline void Ret(const Register& xn = lr); | |
| 420 inline void Rev(const Register& rd, const Register& rn); | |
| 421 inline void Rev16(const Register& rd, const Register& rn); | |
| 422 inline void Rev32(const Register& rd, const Register& rn); | |
| 423 inline void Ror(const Register& rd, const Register& rs, unsigned shift); | |
| 424 inline void Ror(const Register& rd, const Register& rn, const Register& rm); | |
| 425 inline void Sbfiz(const Register& rd, | |
| 426 const Register& rn, | |
| 427 unsigned lsb, | |
| 428 unsigned width); | |
| 429 inline void Sbfx(const Register& rd, | |
| 430 const Register& rn, | |
| 431 unsigned lsb, | |
| 432 unsigned width); | |
| 433 inline void Scvtf(const FPRegister& fd, | |
| 434 const Register& rn, | |
| 435 unsigned fbits = 0); | |
| 436 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm); | |
| 437 inline void Smaddl(const Register& rd, | |
| 438 const Register& rn, | |
| 439 const Register& rm, | |
| 440 const Register& ra); | |
| 441 inline void Smsubl(const Register& rd, | |
| 442 const Register& rn, | |
| 443 const Register& rm, | |
| 444 const Register& ra); | |
| 445 inline void Smull(const Register& rd, | |
| 446 const Register& rn, | |
| 447 const Register& rm); | |
| 448 inline void Smulh(const Register& rd, | |
| 449 const Register& rn, | |
| 450 const Register& rm); | |
| 451 inline void Stnp(const CPURegister& rt, | |
| 452 const CPURegister& rt2, | |
| 453 const MemOperand& dst); | |
| 454 inline void Stp(const CPURegister& rt, | |
| 455 const CPURegister& rt2, | |
| 456 const MemOperand& dst); | |
| 457 inline void Sxtb(const Register& rd, const Register& rn); | |
| 458 inline void Sxth(const Register& rd, const Register& rn); | |
| 459 inline void Sxtw(const Register& rd, const Register& rn); | |
| 460 void Tbnz(const Register& rt, unsigned bit_pos, Label* label); | |
| 461 void Tbz(const Register& rt, unsigned bit_pos, Label* label); | |
| 462 inline void Ubfiz(const Register& rd, | |
| 463 const Register& rn, | |
| 464 unsigned lsb, | |
| 465 unsigned width); | |
| 466 inline void Ubfx(const Register& rd, | |
| 467 const Register& rn, | |
| 468 unsigned lsb, | |
| 469 unsigned width); | |
| 470 inline void Ucvtf(const FPRegister& fd, | |
| 471 const Register& rn, | |
| 472 unsigned fbits = 0); | |
| 473 inline void Udiv(const Register& rd, const Register& rn, const Register& rm); | |
| 474 inline void Umaddl(const Register& rd, | |
| 475 const Register& rn, | |
| 476 const Register& rm, | |
| 477 const Register& ra); | |
| 478 inline void Umsubl(const Register& rd, | |
| 479 const Register& rn, | |
| 480 const Register& rm, | |
| 481 const Register& ra); | |
| 482 inline void Uxtb(const Register& rd, const Register& rn); | |
| 483 inline void Uxth(const Register& rd, const Register& rn); | |
| 484 inline void Uxtw(const Register& rd, const Register& rn); | |
| 485 | |
| 486 // Pseudo-instructions ------------------------------------------------------ | |
| 487 | |
| 488 // Compute rd = abs(rm). | |
| 489 // This function clobbers the condition flags. | |
| 490 // | |
| 491 // If rm is the minimum representable value, the result is not representable. | |
| 492 // Handlers for each case can be specified using the relevant labels. | |
| 493 void Abs(const Register& rd, const Register& rm, | |
| 494 Label * is_not_representable = NULL, | |
| 495 Label * is_representable = NULL); | |
| 496 | |
| 497 // Push or pop up to 4 registers of the same width to or from the stack, | |
| 498 // using the current stack pointer as set by SetStackPointer. | |
| 499 // | |
| 500 // If an argument register is 'NoReg', all further arguments are also assumed | |
| 501 // to be 'NoReg', and are thus not pushed or popped. | |
| 502 // | |
| 503 // Arguments are ordered such that "Push(a, b);" is functionally equivalent | |
| 504 // to "Push(a); Push(b);". | |
| 505 // | |
| 506 // It is valid to push the same register more than once, and there is no | |
| 507 // restriction on the order in which registers are specified. | |
| 508 // | |
| 509 // It is not valid to pop into the same register more than once in one | |
| 510 // operation, not even into the zero register. | |
| 511 // | |
| 512 // If the current stack pointer (as set by SetStackPointer) is csp, then it | |
| 513 // must be aligned to 16 bytes on entry and the total size of the specified | |
| 514 // registers must also be a multiple of 16 bytes. | |
| 515 // | |
| 516 // Even if the current stack pointer is not the system stack pointer (csp), | |
| 517 // Push (and derived methods) will still modify the system stack pointer in | |
| 518 // order to comply with ABI rules about accessing memory below the system | |
| 519 // stack pointer. | |
| 520 // | |
| 521 // Other than the registers passed into Pop, the stack pointer and (possibly) | |
| 522 // the system stack pointer, these methods do not modify any other registers. | |
| 523 // Scratch registers such as Tmp0() and Tmp1() are preserved. | |
| 524 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg, | |
| 525 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg); | |
| 526 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg, | |
| 527 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg); | |
| 528 | |
| 529 // Alternative forms of Push and Pop, taking a RegList or CPURegList that | |
| 530 // specifies the registers that are to be pushed or popped. Higher-numbered | |
| 531 // registers are associated with higher memory addresses (as in the A32 push | |
| 532 // and pop instructions). | |
| 533 // | |
| 534 // (Push|Pop)SizeRegList allow you to specify the register size as a | |
| 535 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are | |
| 536 // supported. | |
| 537 // | |
| 538 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. | |
| 539 void PushCPURegList(CPURegList registers); | |
| 540 void PopCPURegList(CPURegList registers); | |
| 541 | |
| 542 inline void PushSizeRegList(RegList registers, unsigned reg_size, | |
| 543 CPURegister::RegisterType type = CPURegister::kRegister) { | |
| 544 PushCPURegList(CPURegList(type, reg_size, registers)); | |
| 545 } | |
| 546 inline void PopSizeRegList(RegList registers, unsigned reg_size, | |
| 547 CPURegister::RegisterType type = CPURegister::kRegister) { | |
| 548 PopCPURegList(CPURegList(type, reg_size, registers)); | |
| 549 } | |
| 550 inline void PushXRegList(RegList regs) { | |
| 551 PushSizeRegList(regs, kXRegSize); | |
| 552 } | |
| 553 inline void PopXRegList(RegList regs) { | |
| 554 PopSizeRegList(regs, kXRegSize); | |
| 555 } | |
| 556 inline void PushWRegList(RegList regs) { | |
| 557 PushSizeRegList(regs, kWRegSize); | |
| 558 } | |
| 559 inline void PopWRegList(RegList regs) { | |
| 560 PopSizeRegList(regs, kWRegSize); | |
| 561 } | |
| 562 inline void PushDRegList(RegList regs) { | |
| 563 PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister); | |
| 564 } | |
| 565 inline void PopDRegList(RegList regs) { | |
| 566 PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister); | |
| 567 } | |
| 568 inline void PushSRegList(RegList regs) { | |
| 569 PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister); | |
| 570 } | |
| 571 inline void PopSRegList(RegList regs) { | |
| 572 PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister); | |
| 573 } | |
| 574 | |
| 575 // Push the specified register 'count' times. | |
| 576 void PushMultipleTimes(CPURegister src, Register count); | |
| 577 void PushMultipleTimes(CPURegister src, int count); | |
| 578 | |
| 579 // This is a convenience method for pushing a single Handle<Object>. | |
| 580 inline void Push(Handle<Object> handle); | |
| 581 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } | |
| 582 | |
| 583 // Aliases of Push and Pop, required for V8 compatibility. | |
| 584 inline void push(Register src) { | |
| 585 Push(src); | |
| 586 } | |
| 587 inline void pop(Register dst) { | |
| 588 Pop(dst); | |
| 589 } | |
| 590 | |
| 591 // Sometimes callers need to push or pop multiple registers in a way that is | |
| 592 // difficult to structure efficiently for fixed Push or Pop calls. This scope | |
| 593 // allows push requests to be queued up, then flushed at once. The | |
| 594 // MacroAssembler will try to generate the most efficient sequence required. | |
| 595 // | |
| 596 // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of | |
| 597 // register sizes and types. | |
| 598 class PushPopQueue { | |
| 599 public: | |
| 600 explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { } | |
| 601 | |
| 602 ~PushPopQueue() { | |
| 603 ASSERT(queued_.empty()); | |
| 604 } | |
| 605 | |
| 606 void Queue(const CPURegister& rt) { | |
| 607 size_ += rt.SizeInBytes(); | |
| 608 queued_.push_back(rt); | |
| 609 } | |
| 610 | |
| 611 void PushQueued(); | |
| 612 void PopQueued(); | |
| 613 | |
| 614 private: | |
| 615 MacroAssembler* masm_; | |
| 616 int size_; | |
| 617 std::vector<CPURegister> queued_; | |
| 618 }; | |
| 619 | |
| 620 // Poke 'src' onto the stack. The offset is in bytes. | |
| 621 // | |
| 622 // If the current stack pointer (according to StackPointer()) is csp, then | |
| 623 // csp must be aligned to 16 bytes. | |
| 624 void Poke(const CPURegister& src, const Operand& offset); | |
| 625 | |
| 626 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. | |
| 627 // | |
| 628 // If the current stack pointer (according to StackPointer()) is csp, then | |
| 629 // csp must be aligned to 16 bytes. | |
| 630 void Peek(const CPURegister& dst, const Operand& offset); | |
| 631 | |
| 632 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent | |
| 633 // with 'src2' at a higher address than 'src1'. The offset is in bytes. | |
| 634 // | |
| 635 // If the current stack pointer (according to StackPointer()) is csp, then | |
| 636 // csp must be aligned to 16 bytes. | |
| 637 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset); | |
| 638 | |
| 639 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The | |
| 640 // values peeked will be adjacent, with the value in 'dst2' being from a | |
| 641 // higher address than 'dst1'. The offset is in bytes. | |
| 642 // | |
| 643 // If the current stack pointer (according to StackPointer()) is csp, then | |
| 644 // csp must be aligned to 16 bytes. | |
| 645 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset); | |
| 646 | |
| 647 // Claim or drop stack space without actually accessing memory. | |
| 648 // | |
| 649 // In debug mode, both of these will write invalid data into the claimed or | |
| 650 // dropped space. | |
| 651 // | |
| 652 // If the current stack pointer (according to StackPointer()) is csp, then it | |
| 653 // must be aligned to 16 bytes and the size claimed or dropped must be a | |
| 654 // multiple of 16 bytes. | |
| 655 // | |
| 656 // Note that unit_size must be specified in bytes. For variants which take a | |
| 657 // Register count, the unit size must be a power of two. | |
| 658 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSizeInBytes); | |
| 659 inline void Claim(const Register& count, | |
| 660 uint64_t unit_size = kXRegSizeInBytes); | |
| 661 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSizeInBytes); | |
| 662 inline void Drop(const Register& count, | |
| 663 uint64_t unit_size = kXRegSizeInBytes); | |
| 664 | |
| 665 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a | |
| 666 // register. | |
| 667 inline void ClaimBySMI(const Register& count_smi, | |
| 668 uint64_t unit_size = kXRegSizeInBytes); | |
| 669 inline void DropBySMI(const Register& count_smi, | |
| 670 uint64_t unit_size = kXRegSizeInBytes); | |
| 671 | |
| 672 // Compare a register with an operand, and branch to label depending on the | |
| 673 // condition. May corrupt the status flags. | |
| 674 inline void CompareAndBranch(const Register& lhs, | |
| 675 const Operand& rhs, | |
| 676 Condition cond, | |
| 677 Label* label); | |
| 678 | |
| 679 // Test the bits of register defined by bit_pattern, and branch if ANY of | |
| 680 // those bits are set. May corrupt the status flags. | |
| 681 inline void TestAndBranchIfAnySet(const Register& reg, | |
| 682 const uint64_t bit_pattern, | |
| 683 Label* label); | |
| 684 | |
| 685 // Test the bits of register defined by bit_pattern, and branch if ALL of | |
| 686 // those bits are clear (ie. not set.) May corrupt the status flags. | |
| 687 inline void TestAndBranchIfAllClear(const Register& reg, | |
| 688 const uint64_t bit_pattern, | |
| 689 Label* label); | |
| 690 | |
| 691 // Insert one or more instructions into the instruction stream that encode | |
| 692 // some caller-defined data. The instructions used will be executable with no | |
| 693 // side effects. | |
| 694 inline void InlineData(uint64_t data); | |
| 695 | |
| 696 // Insert an instrumentation enable marker into the instruction stream. | |
| 697 inline void EnableInstrumentation(); | |
| 698 | |
| 699 // Insert an instrumentation disable marker into the instruction stream. | |
| 700 inline void DisableInstrumentation(); | |
| 701 | |
| 702 // Insert an instrumentation event marker into the instruction stream. These | |
| 703 // will be picked up by the instrumentation system to annotate an instruction | |
| 704 // profile. The argument marker_name must be a printable two character string; | |
| 705 // it will be encoded in the event marker. | |
| 706 inline void AnnotateInstrumentation(const char* marker_name); | |
| 707 | |
| 708 // If emit_debug_code() is true, emit a run-time check to ensure that | |
| 709 // StackPointer() does not point below the system stack pointer. | |
| 710 // | |
| 711 // Whilst it is architecturally legal for StackPointer() to point below csp, | |
| 712 // it can be evidence of a potential bug because the ABI forbids accesses | |
| 713 // below csp. | |
| 714 // | |
| 715 // If emit_debug_code() is false, this emits no code. | |
| 716 // | |
| 717 // If StackPointer() is the system stack pointer, this emits no code. | |
| 718 void AssertStackConsistency(); | |
| 719 | |
| 720 // Preserve the callee-saved registers (as defined by AAPCS64). | |
| 721 // | |
| 722 // Higher-numbered registers are pushed before lower-numbered registers, and | |
| 723 // thus get higher addresses. | |
| 724 // Floating-point registers are pushed before general-purpose registers, and | |
| 725 // thus get higher addresses. | |
| 726 // | |
| 727 // Note that registers are not checked for invalid values. Use this method | |
| 728 // only if you know that the GC won't try to examine the values on the stack. | |
| 729 // | |
| 730 // This method must not be called unless the current stack pointer (as set by | |
| 731 // SetStackPointer) is the system stack pointer (csp), and is aligned to | |
| 732 // ActivationFrameAlignment(). | |
| 733 void PushCalleeSavedRegisters(); | |
| 734 | |
| 735 // Restore the callee-saved registers (as defined by AAPCS64). | |
| 736 // | |
| 737 // Higher-numbered registers are popped after lower-numbered registers, and | |
| 738 // thus come from higher addresses. | |
| 739 // Floating-point registers are popped after general-purpose registers, and | |
| 740 // thus come from higher addresses. | |
| 741 // | |
| 742 // This method must not be called unless the current stack pointer (as set by | |
| 743 // SetStackPointer) is the system stack pointer (csp), and is aligned to | |
| 744 // ActivationFrameAlignment(). | |
| 745 void PopCalleeSavedRegisters(); | |
| 746 | |
| 747 // Set the current stack pointer, but don't generate any code. | |
| 748 inline void SetStackPointer(const Register& stack_pointer) { | |
| 749 ASSERT(!AreAliased(stack_pointer, Tmp0(), Tmp1())); | |
| 750 sp_ = stack_pointer; | |
| 751 } | |
| 752 | |
| 753 // Return the current stack pointer, as set by SetStackPointer. | |
| 754 inline const Register& StackPointer() const { | |
| 755 return sp_; | |
| 756 } | |
| 757 | |
| 758 // Align csp for a frame, as per ActivationFrameAlignment, and make it the | |
| 759 // current stack pointer. | |
| 760 inline void AlignAndSetCSPForFrame() { | |
| 761 int sp_alignment = ActivationFrameAlignment(); | |
| 762 // AAPCS64 mandates at least 16-byte alignment. | |
| 763 ASSERT(sp_alignment >= 16); | |
| 764 ASSERT(IsPowerOf2(sp_alignment)); | |
| 765 Bic(csp, StackPointer(), sp_alignment - 1); | |
| 766 SetStackPointer(csp); | |
| 767 } | |
| 768 | |
| 769 // Push the system stack pointer (csp) down to allow the same to be done to | |
| 770 // the current stack pointer (according to StackPointer()). This must be | |
| 771 // called _before_ accessing the memory. | |
| 772 // | |
| 773 // This is necessary when pushing or otherwise adding things to the stack, to | |
| 774 // satisfy the AAPCS64 constraint that the memory below the system stack | |
| 775 // pointer is not accessed. | |
| 776 // | |
| 777 // This method asserts that StackPointer() is not csp, since the call does | |
| 778 // not make sense in that context. | |
| 779 // | |
| 780 // TODO(jbramley): Currently, this method can only accept values of 'space' | |
| 781 // that can be encoded in one instruction. Refer to the implementation for | |
| 782 // details. | |
| 783 inline void BumpSystemStackPointer(const Operand& space); | |
| 784 | |
| 785 // Helpers ------------------------------------------------------------------ | |
| 786 // Root register. | |
| 787 inline void InitializeRootRegister(); | |
| 788 | |
| 789 // Load an object from the root table. | |
| 790 void LoadRoot(Register destination, | |
| 791 Heap::RootListIndex index); | |
| 792 // Store an object to the root table. | |
| 793 void StoreRoot(Register source, | |
| 794 Heap::RootListIndex index); | |
| 795 | |
| 796 // Load both TrueValue and FalseValue roots. | |
| 797 void LoadTrueFalseRoots(Register true_root, Register false_root); | |
| 798 | |
| 799 void LoadHeapObject(Register dst, Handle<HeapObject> object); | |
| 800 | |
| 801 void LoadObject(Register result, Handle<Object> object) { | |
| 802 AllowDeferredHandleDereference heap_object_check; | |
| 803 if (object->IsHeapObject()) { | |
| 804 LoadHeapObject(result, Handle<HeapObject>::cast(object)); | |
| 805 } else { | |
| 806 ASSERT(object->IsSmi()); | |
| 807 Mov(result, Operand(object)); | |
| 808 } | |
| 809 } | |
| 810 | |
| 811 static int SafepointRegisterStackIndex(int reg_code); | |
| 812 | |
| 813 // This is required for compatibility with architecture independant code. | |
| 814 // Remove if not needed. | |
| 815 inline void Move(Register dst, Register src) { Mov(dst, src); } | |
| 816 | |
| 817 void LoadInstanceDescriptors(Register map, | |
| 818 Register descriptors); | |
| 819 void EnumLengthUntagged(Register dst, Register map); | |
| 820 void EnumLengthSmi(Register dst, Register map); | |
| 821 void NumberOfOwnDescriptors(Register dst, Register map); | |
| 822 | |
| 823 template<typename Field> | |
| 824 void DecodeField(Register reg) { | |
| 825 static const uint64_t shift = Field::kShift + kSmiShift; | |
| 826 static const uint64_t setbits = CountSetBits(Field::kMask, 32); | |
| 827 Ubfx(reg, reg, shift, setbits); | |
| 828 } | |
| 829 | |
| 830 // ---- SMI and Number Utilities ---- | |
| 831 | |
| 832 inline void SmiTag(Register dst, Register src); | |
| 833 inline void SmiTag(Register smi); | |
| 834 inline void SmiUntag(Register dst, Register src); | |
| 835 inline void SmiUntag(Register smi); | |
| 836 inline void SmiUntagToDouble(FPRegister dst, | |
| 837 Register src, | |
| 838 UntagMode mode = kNotSpeculativeUntag); | |
| 839 inline void SmiUntagToFloat(FPRegister dst, | |
| 840 Register src, | |
| 841 UntagMode mode = kNotSpeculativeUntag); | |
| 842 | |
| 843 // Compute the absolute value of 'smi' and leave the result in 'smi' | |
| 844 // register. If 'smi' is the most negative SMI, the absolute value cannot | |
| 845 // be represented as a SMI and a jump to 'slow' is done. | |
| 846 void SmiAbs(const Register& smi, Label* slow); | |
| 847 | |
| 848 inline void JumpIfSmi(Register value, | |
| 849 Label* smi_label, | |
| 850 Label* not_smi_label = NULL); | |
| 851 inline void JumpIfNotSmi(Register value, Label* not_smi_label); | |
| 852 inline void JumpIfBothSmi(Register value1, | |
| 853 Register value2, | |
| 854 Label* both_smi_label, | |
| 855 Label* not_smi_label = NULL); | |
| 856 inline void JumpIfEitherSmi(Register value1, | |
| 857 Register value2, | |
| 858 Label* either_smi_label, | |
| 859 Label* not_smi_label = NULL); | |
| 860 inline void JumpIfEitherNotSmi(Register value1, | |
| 861 Register value2, | |
| 862 Label* not_smi_label); | |
| 863 inline void JumpIfBothNotSmi(Register value1, | |
| 864 Register value2, | |
| 865 Label* not_smi_label); | |
| 866 | |
| 867 // Abort execution if argument is a smi, enabled via --debug-code. | |
| 868 void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi); | |
| 869 void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi); | |
| 870 | |
| 871 // Abort execution if argument is not a name, enabled via --debug-code. | |
| 872 void AssertName(Register object); | |
| 873 | |
| 874 // Abort execution if argument is not a string, enabled via --debug-code. | |
| 875 void AssertString(Register object); | |
| 876 | |
| 877 void JumpForHeapNumber(Register object, | |
| 878 Register heap_number_map, | |
| 879 Label* on_heap_number, | |
| 880 Label* on_not_heap_number = NULL); | |
| 881 void JumpIfHeapNumber(Register object, | |
| 882 Label* on_heap_number, | |
| 883 Register heap_number_map = NoReg); | |
| 884 void JumpIfNotHeapNumber(Register object, | |
| 885 Label* on_not_heap_number, | |
| 886 Register heap_number_map = NoReg); | |
| 887 | |
| 888 // Jump to label if the input double register contains -0.0. | |
| 889 void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero); | |
| 890 | |
| 891 // Generate code to do a lookup in the number string cache. If the number in | |
| 892 // the register object is found in the cache the generated code falls through | |
| 893 // with the result in the result register. The object and the result register | |
| 894 // can be the same. If the number is not found in the cache the code jumps to | |
| 895 // the label not_found with only the content of register object unchanged. | |
| 896 void LookupNumberStringCache(Register object, | |
| 897 Register result, | |
| 898 Register scratch1, | |
| 899 Register scratch2, | |
| 900 Register scratch3, | |
| 901 Label* not_found); | |
| 902 | |
| 903 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in | |
| 904 // output. | |
| 905 void ClampInt32ToUint8(Register in_out); | |
| 906 void ClampInt32ToUint8(Register output, Register input); | |
| 907 | |
| 908 // Saturate a double in input to an unsigned 8-bit integer in output. | |
| 909 void ClampDoubleToUint8(Register output, | |
| 910 DoubleRegister input, | |
| 911 DoubleRegister dbl_scratch); | |
| 912 | |
| 913 // Try to convert a double to a signed 32-bit int. | |
| 914 // This succeeds if the result compares equal to the input, so inputs of -0.0 | |
| 915 // are converted to 0 and handled as a success. | |
| 916 void TryConvertDoubleToInt32(Register as_int, | |
| 917 FPRegister value, | |
| 918 FPRegister scratch_d, | |
| 919 Label* on_successful_conversion, | |
| 920 Label* on_failed_conversion = NULL) { | |
| 921 ASSERT(as_int.Is32Bits()); | |
| 922 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion, | |
| 923 on_failed_conversion); | |
| 924 } | |
| 925 | |
| 926 // Try to convert a double to a signed 64-bit int. | |
| 927 // This succeeds if the result compares equal to the input, so inputs of -0.0 | |
| 928 // are converted to 0 and handled as a success. | |
| 929 void TryConvertDoubleToInt64(Register as_int, | |
| 930 FPRegister value, | |
| 931 FPRegister scratch_d, | |
| 932 Label* on_successful_conversion, | |
| 933 Label* on_failed_conversion = NULL) { | |
| 934 ASSERT(as_int.Is64Bits()); | |
| 935 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion, | |
| 936 on_failed_conversion); | |
| 937 } | |
| 938 | |
| 939 // ---- Object Utilities ---- | |
| 940 | |
| 941 // Copy fields from 'src' to 'dst', where both are tagged objects. | |
| 942 // The 'temps' list is a list of X registers which can be used for scratch | |
| 943 // values. The temps list must include at least one register, and it must not | |
| 944 // contain Tmp0() or Tmp1(). | |
| 945 // | |
| 946 // Currently, CopyFields cannot make use of more than three registers from | |
| 947 // the 'temps' list. | |
| 948 // | |
| 949 // As with several MacroAssembler methods, Tmp0() and Tmp1() will be used. | |
| 950 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count); | |
| 951 | |
| 952 // Copies a number of bytes from src to dst. All passed registers are | |
| 953 // clobbered. On exit src and dst will point to the place just after where the | |
| 954 // last byte was read or written and length will be zero. Hint may be used to | |
| 955 // determine which is the most efficient algorithm to use for copying. | |
| 956 void CopyBytes(Register dst, | |
| 957 Register src, | |
| 958 Register length, | |
| 959 Register scratch, | |
| 960 CopyHint hint = kCopyUnknown); | |
| 961 | |
| 962 // Initialize fields with filler values. Fields starting at start_offset not | |
| 963 // including end_offset are overwritten with the value in filler. At the end | |
| 964 // of the loop, start_offset takes the value of end_offset. | |
| 965 void InitializeFieldsWithFiller(Register start_offset, | |
| 966 Register end_offset, | |
| 967 Register filler); | |
| 968 | |
| 969 // ---- String Utilities ---- | |
| 970 | |
| 971 | |
| 972 // Jump to label if either object is not a sequential ASCII string. | |
| 973 // Optionally perform a smi check on the objects first. | |
| 974 void JumpIfEitherIsNotSequentialAsciiStrings( | |
| 975 Register first, | |
| 976 Register second, | |
| 977 Register scratch1, | |
| 978 Register scratch2, | |
| 979 Label* failure, | |
| 980 SmiCheckType smi_check = DO_SMI_CHECK); | |
| 981 | |
| 982 // Check if instance type is sequential ASCII string and jump to label if | |
| 983 // it is not. | |
| 984 void JumpIfInstanceTypeIsNotSequentialAscii(Register type, | |
| 985 Register scratch, | |
| 986 Label* failure); | |
| 987 | |
| 988 // Checks if both instance types are sequential ASCII strings and jumps to | |
| 989 // label if either is not. | |
| 990 void JumpIfEitherInstanceTypeIsNotSequentialAscii( | |
| 991 Register first_object_instance_type, | |
| 992 Register second_object_instance_type, | |
| 993 Register scratch1, | |
| 994 Register scratch2, | |
| 995 Label* failure); | |
| 996 | |
| 997 // Checks if both instance types are sequential ASCII strings and jumps to | |
| 998 // label if either is not. | |
| 999 void JumpIfBothInstanceTypesAreNotSequentialAscii( | |
| 1000 Register first_object_instance_type, | |
| 1001 Register second_object_instance_type, | |
| 1002 Register scratch1, | |
| 1003 Register scratch2, | |
| 1004 Label* failure); | |
| 1005 | |
| 1006 void JumpIfNotUniqueName(Register type, Label* not_unique_name); | |
| 1007 | |
| 1008 // ---- Calling / Jumping helpers ---- | |
| 1009 | |
| 1010 // This is required for compatibility in architecture indepenedant code. | |
| 1011 inline void jmp(Label* L) { B(L); } | |
| 1012 | |
| 1013 // Passes thrown value to the handler of top of the try handler chain. | |
| 1014 // Register value must be x0. | |
| 1015 void Throw(Register value, | |
| 1016 Register scratch1, | |
| 1017 Register scratch2, | |
| 1018 Register scratch3, | |
| 1019 Register scratch4); | |
| 1020 | |
| 1021 // Propagates an uncatchable exception to the top of the current JS stack's | |
| 1022 // handler chain. Register value must be x0. | |
| 1023 void ThrowUncatchable(Register value, | |
| 1024 Register scratch1, | |
| 1025 Register scratch2, | |
| 1026 Register scratch3, | |
| 1027 Register scratch4); | |
| 1028 | |
| 1029 // Throw a message string as an exception. | |
| 1030 void Throw(BailoutReason reason); | |
| 1031 | |
| 1032 // Throw a message string as an exception if a condition is not true. | |
| 1033 void ThrowIf(Condition cc, BailoutReason reason); | |
| 1034 | |
| 1035 // Throw a message string as an exception if the value is a smi. | |
| 1036 void ThrowIfSmi(const Register& value, BailoutReason reason); | |
| 1037 | |
| 1038 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); | |
| 1039 void TailCallStub(CodeStub* stub); | |
| 1040 | |
| 1041 void CallRuntime(const Runtime::Function* f, | |
| 1042 int num_arguments, | |
| 1043 SaveFPRegsMode save_doubles = kDontSaveFPRegs); | |
| 1044 | |
| 1045 void CallRuntime(Runtime::FunctionId id, | |
| 1046 int num_arguments, | |
| 1047 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { | |
| 1048 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); | |
| 1049 } | |
| 1050 | |
| 1051 // TODO(all): Why does this variant save FP regs unconditionally? | |
| 1052 void CallRuntimeSaveDoubles(Runtime::FunctionId id) { | |
| 1053 const Runtime::Function* function = Runtime::FunctionForId(id); | |
| 1054 CallRuntime(function, function->nargs, kSaveFPRegs); | |
| 1055 } | |
| 1056 | |
| 1057 void TailCallRuntime(Runtime::FunctionId fid, | |
| 1058 int num_arguments, | |
| 1059 int result_size); | |
| 1060 | |
| 1061 int ActivationFrameAlignment(); | |
| 1062 | |
| 1063 // Calls a C function. | |
| 1064 // The called function is not allowed to trigger a | |
| 1065 // garbage collection, since that might move the code and invalidate the | |
| 1066 // return address (unless this is somehow accounted for by the called | |
| 1067 // function). | |
| 1068 void CallCFunction(ExternalReference function, | |
| 1069 int num_reg_arguments); | |
| 1070 void CallCFunction(ExternalReference function, | |
| 1071 int num_reg_arguments, | |
| 1072 int num_double_arguments); | |
| 1073 void CallCFunction(Register function, | |
| 1074 int num_reg_arguments, | |
| 1075 int num_double_arguments); | |
| 1076 | |
| 1077 // Calls an API function. Allocates HandleScope, extracts returned value | |
| 1078 // from handle and propagates exceptions. | |
| 1079 // 'stack_space' is the space to be unwound on exit (includes the call JS | |
| 1080 // arguments space and the additional space allocated for the fast call). | |
| 1081 // 'spill_offset' is the offset from the stack pointer where | |
| 1082 // CallApiFunctionAndReturn can spill registers. | |
| 1083 void CallApiFunctionAndReturn(Register function_address, | |
| 1084 ExternalReference thunk_ref, | |
| 1085 int stack_space, | |
| 1086 int spill_offset, | |
| 1087 MemOperand return_value_operand, | |
| 1088 MemOperand* context_restore_operand); | |
| 1089 | |
| 1090 // The number of register that CallApiFunctionAndReturn will need to save on | |
| 1091 // the stack. The space for these registers need to be allocated in the | |
| 1092 // ExitFrame before calling CallApiFunctionAndReturn. | |
| 1093 static const int kCallApiFunctionSpillSpace = 4; | |
| 1094 | |
| 1095 // Jump to a runtime routine. | |
| 1096 void JumpToExternalReference(const ExternalReference& builtin); | |
| 1097 // Tail call of a runtime routine (jump). | |
| 1098 // Like JumpToExternalReference, but also takes care of passing the number | |
| 1099 // of parameters. | |
| 1100 void TailCallExternalReference(const ExternalReference& ext, | |
| 1101 int num_arguments, | |
| 1102 int result_size); | |
| 1103 void CallExternalReference(const ExternalReference& ext, | |
| 1104 int num_arguments); | |
| 1105 | |
| 1106 | |
| 1107 // Invoke specified builtin JavaScript function. Adds an entry to | |
| 1108 // the unresolved list if the name does not resolve. | |
| 1109 void InvokeBuiltin(Builtins::JavaScript id, | |
| 1110 InvokeFlag flag, | |
| 1111 const CallWrapper& call_wrapper = NullCallWrapper()); | |
| 1112 | |
| 1113 // Store the code object for the given builtin in the target register and | |
| 1114 // setup the function in x1. | |
| 1115 // TODO(all): Can we use another register than x1? | |
| 1116 void GetBuiltinEntry(Register target, Builtins::JavaScript id); | |
| 1117 | |
| 1118 // Store the function for the given builtin in the target register. | |
| 1119 void GetBuiltinFunction(Register target, Builtins::JavaScript id); | |
| 1120 | |
| 1121 void Jump(Register target); | |
| 1122 void Jump(Address target, RelocInfo::Mode rmode); | |
| 1123 void Jump(Handle<Code> code, RelocInfo::Mode rmode); | |
| 1124 void Jump(intptr_t target, RelocInfo::Mode rmode); | |
| 1125 | |
| 1126 void Call(Register target); | |
| 1127 void Call(Label* target); | |
| 1128 void Call(Address target, RelocInfo::Mode rmode); | |
| 1129 void Call(Handle<Code> code, | |
| 1130 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, | |
| 1131 TypeFeedbackId ast_id = TypeFeedbackId::None()); | |
| 1132 | |
| 1133 // For every Call variant, there is a matching CallSize function that returns | |
| 1134 // the size (in bytes) of the call sequence. | |
| 1135 static int CallSize(Register target); | |
| 1136 static int CallSize(Label* target); | |
| 1137 static int CallSize(Address target, RelocInfo::Mode rmode); | |
| 1138 static int CallSize(Handle<Code> code, | |
| 1139 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, | |
| 1140 TypeFeedbackId ast_id = TypeFeedbackId::None()); | |
| 1141 | |
| 1142 // Registers used through the invocation chain are hard-coded. | |
| 1143 // We force passing the parameters to ensure the contracts are correctly | |
| 1144 // honoured by the caller. | |
| 1145 // 'function' must be x1. | |
| 1146 // 'actual' must use an immediate or x0. | |
| 1147 // 'expected' must use an immediate or x2. | |
| 1148 // 'call_kind' must be x5. | |
| 1149 void InvokePrologue(const ParameterCount& expected, | |
| 1150 const ParameterCount& actual, | |
| 1151 Handle<Code> code_constant, | |
| 1152 Register code_reg, | |
| 1153 Label* done, | |
| 1154 InvokeFlag flag, | |
| 1155 bool* definitely_mismatches, | |
| 1156 const CallWrapper& call_wrapper); | |
| 1157 void InvokeCode(Register code, | |
| 1158 const ParameterCount& expected, | |
| 1159 const ParameterCount& actual, | |
| 1160 InvokeFlag flag, | |
| 1161 const CallWrapper& call_wrapper); | |
| 1162 // Invoke the JavaScript function in the given register. | |
| 1163 // Changes the current context to the context in the function before invoking. | |
| 1164 void InvokeFunction(Register function, | |
| 1165 const ParameterCount& actual, | |
| 1166 InvokeFlag flag, | |
| 1167 const CallWrapper& call_wrapper); | |
| 1168 void InvokeFunction(Register function, | |
| 1169 const ParameterCount& expected, | |
| 1170 const ParameterCount& actual, | |
| 1171 InvokeFlag flag, | |
| 1172 const CallWrapper& call_wrapper); | |
| 1173 void InvokeFunction(Handle<JSFunction> function, | |
| 1174 const ParameterCount& expected, | |
| 1175 const ParameterCount& actual, | |
| 1176 InvokeFlag flag, | |
| 1177 const CallWrapper& call_wrapper); | |
| 1178 | |
| 1179 | |
| 1180 // ---- Floating point helpers ---- | |
| 1181 | |
| 1182 // Perform a conversion from a double to a signed int64. If the input fits in | |
| 1183 // range of the 64-bit result, execution branches to done. Otherwise, | |
| 1184 // execution falls through, and the sign of the result can be used to | |
| 1185 // determine if overflow was towards positive or negative infinity. | |
| 1186 // | |
| 1187 // On successful conversion, the least significant 32 bits of the result are | |
| 1188 // equivalent to the ECMA-262 operation "ToInt32". | |
| 1189 // | |
| 1190 // Only public for the test code in test-code-stubs-a64.cc. | |
| 1191 void TryConvertDoubleToInt64(Register result, | |
| 1192 DoubleRegister input, | |
| 1193 Label* done); | |
| 1194 | |
| 1195 // Performs a truncating conversion of a floating point number as used by | |
| 1196 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. | |
| 1197 // Exits with 'result' holding the answer. | |
| 1198 void TruncateDoubleToI(Register result, DoubleRegister double_input); | |
| 1199 | |
| 1200 // Performs a truncating conversion of a heap number as used by | |
| 1201 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' | |
| 1202 // must be different registers. Exits with 'result' holding the answer. | |
| 1203 void TruncateHeapNumberToI(Register result, Register object); | |
| 1204 | |
| 1205 // Converts the smi or heap number in object to an int32 using the rules | |
| 1206 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated | |
| 1207 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be | |
| 1208 // different registers. | |
| 1209 void TruncateNumberToI(Register object, | |
| 1210 Register result, | |
| 1211 Register heap_number_map, | |
| 1212 Label* not_int32); | |
| 1213 | |
| 1214 // ---- Code generation helpers ---- | |
| 1215 | |
| 1216 void set_generating_stub(bool value) { generating_stub_ = value; } | |
| 1217 bool generating_stub() const { return generating_stub_; } | |
| 1218 #if DEBUG | |
| 1219 void set_allow_macro_instructions(bool value) { | |
| 1220 allow_macro_instructions_ = value; | |
| 1221 } | |
| 1222 bool allow_macro_instructions() const { return allow_macro_instructions_; } | |
| 1223 #endif | |
| 1224 bool use_real_aborts() const { return use_real_aborts_; } | |
| 1225 void set_has_frame(bool value) { has_frame_ = value; } | |
| 1226 bool has_frame() const { return has_frame_; } | |
| 1227 bool AllowThisStubCall(CodeStub* stub); | |
| 1228 | |
| 1229 class NoUseRealAbortsScope { | |
| 1230 public: | |
| 1231 explicit NoUseRealAbortsScope(MacroAssembler* masm) : | |
| 1232 saved_(masm->use_real_aborts_), masm_(masm) { | |
| 1233 masm_->use_real_aborts_ = false; | |
| 1234 } | |
| 1235 ~NoUseRealAbortsScope() { | |
| 1236 masm_->use_real_aborts_ = saved_; | |
| 1237 } | |
| 1238 private: | |
| 1239 bool saved_; | |
| 1240 MacroAssembler* masm_; | |
| 1241 }; | |
| 1242 | |
| 1243 #ifdef ENABLE_DEBUGGER_SUPPORT | |
| 1244 // --------------------------------------------------------------------------- | |
| 1245 // Debugger Support | |
| 1246 | |
| 1247 void DebugBreak(); | |
| 1248 #endif | |
| 1249 // --------------------------------------------------------------------------- | |
| 1250 // Exception handling | |
| 1251 | |
| 1252 // Push a new try handler and link into try handler chain. | |
| 1253 void PushTryHandler(StackHandler::Kind kind, int handler_index); | |
| 1254 | |
| 1255 // Unlink the stack handler on top of the stack from the try handler chain. | |
| 1256 // Must preserve the result register. | |
| 1257 void PopTryHandler(); | |
| 1258 | |
| 1259 | |
| 1260 // --------------------------------------------------------------------------- | |
| 1261 // Allocation support | |
| 1262 | |
| 1263 // Allocate an object in new space or old pointer space. The object_size is | |
| 1264 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS | |
| 1265 // is passed. The allocated object is returned in result. | |
| 1266 // | |
| 1267 // If the new space is exhausted control continues at the gc_required label. | |
| 1268 // In this case, the result and scratch registers may still be clobbered. | |
| 1269 // If flags includes TAG_OBJECT, the result is tagged as as a heap object. | |
| 1270 void Allocate(Register object_size, | |
| 1271 Register result, | |
| 1272 Register scratch1, | |
| 1273 Register scratch2, | |
| 1274 Label* gc_required, | |
| 1275 AllocationFlags flags); | |
| 1276 | |
| 1277 void Allocate(int object_size, | |
| 1278 Register result, | |
| 1279 Register scratch1, | |
| 1280 Register scratch2, | |
| 1281 Label* gc_required, | |
| 1282 AllocationFlags flags); | |
| 1283 | |
| 1284 // Undo allocation in new space. The object passed and objects allocated after | |
| 1285 // it will no longer be allocated. The caller must make sure that no pointers | |
| 1286 // are left to the object(s) no longer allocated as they would be invalid when | |
| 1287 // allocation is undone. | |
| 1288 void UndoAllocationInNewSpace(Register object, Register scratch); | |
| 1289 | |
| 1290 void AllocateTwoByteString(Register result, | |
| 1291 Register length, | |
| 1292 Register scratch1, | |
| 1293 Register scratch2, | |
| 1294 Register scratch3, | |
| 1295 Label* gc_required); | |
| 1296 void AllocateAsciiString(Register result, | |
| 1297 Register length, | |
| 1298 Register scratch1, | |
| 1299 Register scratch2, | |
| 1300 Register scratch3, | |
| 1301 Label* gc_required); | |
| 1302 void AllocateTwoByteConsString(Register result, | |
| 1303 Register length, | |
| 1304 Register scratch1, | |
| 1305 Register scratch2, | |
| 1306 Label* gc_required); | |
| 1307 void AllocateAsciiConsString(Register result, | |
| 1308 Register length, | |
| 1309 Register scratch1, | |
| 1310 Register scratch2, | |
| 1311 Label* gc_required); | |
| 1312 void AllocateTwoByteSlicedString(Register result, | |
| 1313 Register length, | |
| 1314 Register scratch1, | |
| 1315 Register scratch2, | |
| 1316 Label* gc_required); | |
| 1317 void AllocateAsciiSlicedString(Register result, | |
| 1318 Register length, | |
| 1319 Register scratch1, | |
| 1320 Register scratch2, | |
| 1321 Label* gc_required); | |
| 1322 | |
| 1323 // Allocates a heap number or jumps to the gc_required label if the young | |
| 1324 // space is full and a scavenge is needed. | |
| 1325 // All registers are clobbered. | |
| 1326 // If no heap_number_map register is provided, the function will take care of | |
| 1327 // loading it. | |
| 1328 void AllocateHeapNumber(Register result, | |
| 1329 Label* gc_required, | |
| 1330 Register scratch1, | |
| 1331 Register scratch2, | |
| 1332 Register heap_number_map = NoReg); | |
| 1333 void AllocateHeapNumberWithValue(Register result, | |
| 1334 DoubleRegister value, | |
| 1335 Label* gc_required, | |
| 1336 Register scratch1, | |
| 1337 Register scratch2, | |
| 1338 Register heap_number_map = NoReg); | |
| 1339 | |
| 1340 // --------------------------------------------------------------------------- | |
| 1341 // Support functions. | |
| 1342 | |
| 1343 // Try to get function prototype of a function and puts the value in the | |
| 1344 // result register. Checks that the function really is a function and jumps | |
| 1345 // to the miss label if the fast checks fail. The function register will be | |
| 1346 // untouched; the other registers may be clobbered. | |
| 1347 enum BoundFunctionAction { | |
| 1348 kMissOnBoundFunction, | |
| 1349 kDontMissOnBoundFunction | |
| 1350 }; | |
| 1351 | |
| 1352 void TryGetFunctionPrototype(Register function, | |
| 1353 Register result, | |
| 1354 Register scratch, | |
| 1355 Label* miss, | |
| 1356 BoundFunctionAction action = | |
| 1357 kDontMissOnBoundFunction); | |
| 1358 | |
| 1359 // Compare object type for heap object. heap_object contains a non-Smi | |
| 1360 // whose object type should be compared with the given type. This both | |
| 1361 // sets the flags and leaves the object type in the type_reg register. | |
| 1362 // It leaves the map in the map register (unless the type_reg and map register | |
| 1363 // are the same register). It leaves the heap object in the heap_object | |
| 1364 // register unless the heap_object register is the same register as one of the | |
| 1365 // other registers. | |
| 1366 void CompareObjectType(Register heap_object, | |
| 1367 Register map, | |
| 1368 Register type_reg, | |
| 1369 InstanceType type); | |
| 1370 | |
| 1371 | |
| 1372 // Compare object type for heap object, and branch if equal (or not.) | |
| 1373 // heap_object contains a non-Smi whose object type should be compared with | |
| 1374 // the given type. This both sets the flags and leaves the object type in | |
| 1375 // the type_reg register. It leaves the map in the map register (unless the | |
| 1376 // type_reg and map register are the same register). It leaves the heap | |
| 1377 // object in the heap_object register unless the heap_object register is the | |
| 1378 // same register as one of the other registers. | |
| 1379 void JumpIfObjectType(Register object, | |
| 1380 Register map, | |
| 1381 Register type_reg, | |
| 1382 InstanceType type, | |
| 1383 Label* if_cond_pass, | |
| 1384 Condition cond = eq); | |
| 1385 | |
| 1386 void JumpIfNotObjectType(Register object, | |
| 1387 Register map, | |
| 1388 Register type_reg, | |
| 1389 InstanceType type, | |
| 1390 Label* if_not_object); | |
| 1391 | |
| 1392 // Compare instance type in a map. map contains a valid map object whose | |
| 1393 // object type should be compared with the given type. This both | |
| 1394 // sets the flags and leaves the object type in the type_reg register. | |
| 1395 void CompareInstanceType(Register map, | |
| 1396 Register type_reg, | |
| 1397 InstanceType type); | |
| 1398 | |
| 1399 // Compare an object's map with the specified map. Condition flags are set | |
| 1400 // with result of map compare. | |
| 1401 void CompareMap(Register obj, | |
| 1402 Register scratch, | |
| 1403 Handle<Map> map); | |
| 1404 | |
| 1405 // As above, but the map of the object is already loaded into the register | |
| 1406 // which is preserved by the code generated. | |
| 1407 void CompareMap(Register obj_map, | |
| 1408 Handle<Map> map); | |
| 1409 | |
| 1410 // Check if the map of an object is equal to a specified map and branch to | |
| 1411 // label if not. Skip the smi check if not required (object is known to be a | |
| 1412 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match | |
| 1413 // against maps that are ElementsKind transition maps of the specified map. | |
| 1414 void CheckMap(Register obj, | |
| 1415 Register scratch, | |
| 1416 Handle<Map> map, | |
| 1417 Label* fail, | |
| 1418 SmiCheckType smi_check_type); | |
| 1419 | |
| 1420 | |
| 1421 void CheckMap(Register obj, | |
| 1422 Register scratch, | |
| 1423 Heap::RootListIndex index, | |
| 1424 Label* fail, | |
| 1425 SmiCheckType smi_check_type); | |
| 1426 | |
| 1427 // As above, but the map of the object is already loaded into obj_map, and is | |
| 1428 // preserved. | |
| 1429 void CheckMap(Register obj_map, | |
| 1430 Handle<Map> map, | |
| 1431 Label* fail, | |
| 1432 SmiCheckType smi_check_type); | |
| 1433 | |
| 1434 // Check if the map of an object is equal to a specified map and branch to a | |
| 1435 // specified target if equal. Skip the smi check if not required (object is | |
| 1436 // known to be a heap object) | |
| 1437 void DispatchMap(Register obj, | |
| 1438 Register scratch, | |
| 1439 Handle<Map> map, | |
| 1440 Handle<Code> success, | |
| 1441 SmiCheckType smi_check_type); | |
| 1442 | |
| 1443 // Test the bitfield of the heap object map with mask and set the condition | |
| 1444 // flags. The object register is preserved. | |
| 1445 void TestMapBitfield(Register object, uint64_t mask); | |
| 1446 | |
| 1447 // Load the elements kind field of an object, and return it in the result | |
| 1448 // register. | |
| 1449 void LoadElementsKind(Register result, Register object); | |
| 1450 | |
| 1451 // Compare the object in a register to a value from the root list. | |
| 1452 // Uses the Tmp0() register as scratch. | |
| 1453 void CompareRoot(const Register& obj, Heap::RootListIndex index); | |
| 1454 | |
| 1455 // Compare the object in a register to a value and jump if they are equal. | |
| 1456 void JumpIfRoot(const Register& obj, | |
| 1457 Heap::RootListIndex index, | |
| 1458 Label* if_equal); | |
| 1459 | |
| 1460 // Compare the object in a register to a value and jump if they are not equal. | |
| 1461 void JumpIfNotRoot(const Register& obj, | |
| 1462 Heap::RootListIndex index, | |
| 1463 Label* if_not_equal); | |
| 1464 | |
| 1465 // Load and check the instance type of an object for being a unique name. | |
| 1466 // Loads the type into the second argument register. | |
| 1467 // The object and type arguments can be the same register; in that case it | |
| 1468 // will be overwritten with the type. | |
| 1469 // Fall-through if the object was a string and jump on fail otherwise. | |
| 1470 inline void IsObjectNameType(Register object, Register type, Label* fail); | |
| 1471 | |
| 1472 inline void IsObjectJSObjectType(Register heap_object, | |
| 1473 Register map, | |
| 1474 Register scratch, | |
| 1475 Label* fail); | |
| 1476 | |
| 1477 // Check the instance type in the given map to see if it corresponds to a | |
| 1478 // JS object type. Jump to the fail label if this is not the case and fall | |
| 1479 // through otherwise. However if fail label is NULL, no branch will be | |
| 1480 // performed and the flag will be updated. You can test the flag for "le" | |
| 1481 // condition to test if it is a valid JS object type. | |
| 1482 inline void IsInstanceJSObjectType(Register map, | |
| 1483 Register scratch, | |
| 1484 Label* fail); | |
| 1485 | |
| 1486 // Load and check the instance type of an object for being a string. | |
| 1487 // Loads the type into the second argument register. | |
| 1488 // The object and type arguments can be the same register; in that case it | |
| 1489 // will be overwritten with the type. | |
| 1490 // Jumps to not_string or string appropriate. If the appropriate label is | |
| 1491 // NULL, fall through. | |
| 1492 inline void IsObjectJSStringType(Register object, Register type, | |
| 1493 Label* not_string, Label* string = NULL); | |
| 1494 | |
| 1495 // Compare the contents of a register with an operand, and branch to true, | |
| 1496 // false or fall through, depending on condition. | |
| 1497 void CompareAndSplit(const Register& lhs, | |
| 1498 const Operand& rhs, | |
| 1499 Condition cond, | |
| 1500 Label* if_true, | |
| 1501 Label* if_false, | |
| 1502 Label* fall_through); | |
| 1503 | |
| 1504 // Test the bits of register defined by bit_pattern, and branch to | |
| 1505 // if_any_set, if_all_clear or fall_through accordingly. | |
| 1506 void TestAndSplit(const Register& reg, | |
| 1507 uint64_t bit_pattern, | |
| 1508 Label* if_all_clear, | |
| 1509 Label* if_any_set, | |
| 1510 Label* fall_through); | |
| 1511 | |
| 1512 // Check if a map for a JSObject indicates that the object has fast elements. | |
| 1513 // Jump to the specified label if it does not. | |
| 1514 void CheckFastElements(Register map, | |
| 1515 Register scratch, | |
| 1516 Label* fail); | |
| 1517 | |
| 1518 // Check if a map for a JSObject indicates that the object can have both smi | |
| 1519 // and HeapObject elements. Jump to the specified label if it does not. | |
| 1520 void CheckFastObjectElements(Register map, | |
| 1521 Register scratch, | |
| 1522 Label* fail); | |
| 1523 | |
| 1524 // Check if a map for a JSObject indicates that the object has fast smi only | |
| 1525 // elements. Jump to the specified label if it does not. | |
| 1526 void CheckFastSmiElements(Register map, Register scratch, Label* fail); | |
| 1527 | |
| 1528 // Check to see if number can be stored as a double in FastDoubleElements. | |
| 1529 // If it can, store it at the index specified by key_reg in the array, | |
| 1530 // otherwise jump to fail. | |
| 1531 void StoreNumberToDoubleElements(Register value_reg, | |
| 1532 Register key_reg, | |
| 1533 Register elements_reg, | |
| 1534 Register scratch1, | |
| 1535 FPRegister fpscratch1, | |
| 1536 FPRegister fpscratch2, | |
| 1537 Label* fail, | |
| 1538 int elements_offset = 0); | |
| 1539 | |
| 1540 // Picks out an array index from the hash field. | |
| 1541 // Register use: | |
| 1542 // hash - holds the index's hash. Clobbered. | |
| 1543 // index - holds the overwritten index on exit. | |
| 1544 void IndexFromHash(Register hash, Register index); | |
| 1545 | |
| 1546 // --------------------------------------------------------------------------- | |
| 1547 // Inline caching support. | |
| 1548 | |
| 1549 void EmitSeqStringSetCharCheck(Register string, | |
| 1550 Register index, | |
| 1551 SeqStringSetCharCheckIndexType index_type, | |
| 1552 Register scratch, | |
| 1553 uint32_t encoding_mask); | |
| 1554 | |
| 1555 // Generate code for checking access rights - used for security checks | |
| 1556 // on access to global objects across environments. The holder register | |
| 1557 // is left untouched, whereas both scratch registers are clobbered. | |
| 1558 void CheckAccessGlobalProxy(Register holder_reg, | |
| 1559 Register scratch, | |
| 1560 Label* miss); | |
| 1561 | |
| 1562 // Hash the interger value in 'key' register. | |
| 1563 // It uses the same algorithm as ComputeIntegerHash in utils.h. | |
| 1564 void GetNumberHash(Register key, Register scratch); | |
| 1565 | |
| 1566 // Load value from the dictionary. | |
| 1567 // | |
| 1568 // elements - holds the slow-case elements of the receiver on entry. | |
| 1569 // Unchanged unless 'result' is the same register. | |
| 1570 // | |
| 1571 // key - holds the smi key on entry. | |
| 1572 // Unchanged unless 'result' is the same register. | |
| 1573 // | |
| 1574 // result - holds the result on exit if the load succeeded. | |
| 1575 // Allowed to be the same as 'key' or 'result'. | |
| 1576 // Unchanged on bailout so 'key' or 'result' can be used | |
| 1577 // in further computation. | |
| 1578 void LoadFromNumberDictionary(Label* miss, | |
| 1579 Register elements, | |
| 1580 Register key, | |
| 1581 Register result, | |
| 1582 Register scratch0, | |
| 1583 Register scratch1, | |
| 1584 Register scratch2, | |
| 1585 Register scratch3); | |
| 1586 | |
| 1587 // --------------------------------------------------------------------------- | |
| 1588 // Frames. | |
| 1589 | |
| 1590 // Activation support. | |
| 1591 // Note that Tmp0() and Tmp1() are used as a scratch registers. This is safe | |
| 1592 // because these methods are not used in Crankshaft. | |
| 1593 void EnterFrame(StackFrame::Type type); | |
| 1594 void LeaveFrame(StackFrame::Type type); | |
| 1595 | |
| 1596 // Returns map with validated enum cache in object register. | |
| 1597 void CheckEnumCache(Register object, | |
| 1598 Register null_value, | |
| 1599 Register scratch0, | |
| 1600 Register scratch1, | |
| 1601 Register scratch2, | |
| 1602 Register scratch3, | |
| 1603 Label* call_runtime); | |
| 1604 | |
| 1605 // AllocationMemento support. Arrays may have an associated | |
| 1606 // AllocationMemento object that can be checked for in order to pretransition | |
| 1607 // to another type. | |
| 1608 // On entry, receiver should point to the array object. | |
| 1609 // If allocation info is present, the Z flag is set (so that the eq | |
| 1610 // condition will pass). | |
| 1611 void TestJSArrayForAllocationMemento(Register receiver, | |
| 1612 Register scratch1, | |
| 1613 Register scratch2, | |
| 1614 Label* no_memento_found); | |
| 1615 | |
| 1616 void JumpIfJSArrayHasAllocationMemento(Register receiver, | |
| 1617 Register scratch1, | |
| 1618 Register scratch2, | |
| 1619 Label* memento_found) { | |
| 1620 Label no_memento_found; | |
| 1621 TestJSArrayForAllocationMemento(receiver, scratch1, scratch2, | |
| 1622 &no_memento_found); | |
| 1623 B(eq, memento_found); | |
| 1624 Bind(&no_memento_found); | |
| 1625 } | |
| 1626 | |
| 1627 // The stack pointer has to switch between csp and jssp when setting up and | |
| 1628 // destroying the exit frame. Hence preserving/restoring the registers is | |
| 1629 // slightly more complicated than simple push/pop operations. | |
| 1630 void ExitFramePreserveFPRegs(); | |
| 1631 void ExitFrameRestoreFPRegs(); | |
| 1632 | |
| 1633 // Generates function and stub prologue code. | |
| 1634 void Prologue(PrologueFrameMode frame_mode); | |
| 1635 | |
| 1636 // Enter exit frame. Exit frames are used when calling C code from generated | |
| 1637 // (JavaScript) code. | |
| 1638 // | |
| 1639 // The stack pointer must be jssp on entry, and will be set to csp by this | |
| 1640 // function. The frame pointer is also configured, but the only other | |
| 1641 // registers modified by this function are the provided scratch register, and | |
| 1642 // jssp. | |
| 1643 // | |
| 1644 // The 'extra_space' argument can be used to allocate some space in the exit | |
| 1645 // frame that will be ignored by the GC. This space will be reserved in the | |
| 1646 // bottom of the frame immediately above the return address slot. | |
| 1647 // | |
| 1648 // Set up a stack frame and registers as follows: | |
| 1649 // fp[8]: CallerPC (lr) | |
| 1650 // fp -> fp[0]: CallerFP (old fp) | |
| 1651 // fp[-8]: SPOffset (new csp) | |
| 1652 // fp[-16]: CodeObject() | |
| 1653 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true. | |
| 1654 // csp[8]: Memory reserved for the caller if extra_space != 0. | |
| 1655 // Alignment padding, if necessary. | |
| 1656 // csp -> csp[0]: Space reserved for the return address. | |
| 1657 // | |
| 1658 // This function also stores the new frame information in the top frame, so | |
| 1659 // that the new frame becomes the current frame. | |
| 1660 void EnterExitFrame(bool save_doubles, | |
| 1661 const Register& scratch, | |
| 1662 int extra_space = 0); | |
| 1663 | |
| 1664 // Leave the current exit frame, after a C function has returned to generated | |
| 1665 // (JavaScript) code. | |
| 1666 // | |
| 1667 // This effectively unwinds the operation of EnterExitFrame: | |
| 1668 // * Preserved doubles are restored (if restore_doubles is true). | |
| 1669 // * The frame information is removed from the top frame. | |
| 1670 // * The exit frame is dropped. | |
| 1671 // * The stack pointer is reset to jssp. | |
| 1672 // | |
| 1673 // The stack pointer must be csp on entry. | |
| 1674 void LeaveExitFrame(bool save_doubles, | |
| 1675 const Register& scratch, | |
| 1676 bool restore_context); | |
| 1677 | |
| 1678 void LoadContext(Register dst, int context_chain_length); | |
| 1679 | |
| 1680 // --------------------------------------------------------------------------- | |
| 1681 // StatsCounter support | |
| 1682 | |
| 1683 void SetCounter(StatsCounter* counter, int value, Register scratch1, | |
| 1684 Register scratch2); | |
| 1685 void IncrementCounter(StatsCounter* counter, int value, Register scratch1, | |
| 1686 Register scratch2); | |
| 1687 void DecrementCounter(StatsCounter* counter, int value, Register scratch1, | |
| 1688 Register scratch2); | |
| 1689 | |
| 1690 // --------------------------------------------------------------------------- | |
| 1691 // Garbage collector support (GC). | |
| 1692 | |
| 1693 enum RememberedSetFinalAction { | |
| 1694 kReturnAtEnd, | |
| 1695 kFallThroughAtEnd | |
| 1696 }; | |
| 1697 | |
| 1698 // Record in the remembered set the fact that we have a pointer to new space | |
| 1699 // at the address pointed to by the addr register. Only works if addr is not | |
| 1700 // in new space. | |
| 1701 void RememberedSetHelper(Register object, // Used for debug code. | |
| 1702 Register addr, | |
| 1703 Register scratch, | |
| 1704 SaveFPRegsMode save_fp, | |
| 1705 RememberedSetFinalAction and_then); | |
| 1706 | |
| 1707 // Push and pop the registers that can hold pointers, as defined by the | |
| 1708 // RegList constant kSafepointSavedRegisters. | |
| 1709 void PushSafepointRegisters(); | |
| 1710 void PopSafepointRegisters(); | |
| 1711 | |
| 1712 void PushSafepointFPRegisters(); | |
| 1713 void PopSafepointFPRegisters(); | |
| 1714 | |
| 1715 // Store value in register src in the safepoint stack slot for register dst. | |
| 1716 void StoreToSafepointRegisterSlot(Register src, Register dst) { | |
| 1717 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize); | |
| 1718 } | |
| 1719 | |
| 1720 // Load the value of the src register from its safepoint stack slot | |
| 1721 // into register dst. | |
| 1722 void LoadFromSafepointRegisterSlot(Register dst, Register src) { | |
| 1723 Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize); | |
| 1724 } | |
| 1725 | |
| 1726 void CheckPageFlagSet(const Register& object, | |
| 1727 const Register& scratch, | |
| 1728 int mask, | |
| 1729 Label* if_any_set); | |
| 1730 | |
| 1731 void CheckPageFlagClear(const Register& object, | |
| 1732 const Register& scratch, | |
| 1733 int mask, | |
| 1734 Label* if_all_clear); | |
| 1735 | |
| 1736 void CheckMapDeprecated(Handle<Map> map, | |
| 1737 Register scratch, | |
| 1738 Label* if_deprecated); | |
| 1739 | |
| 1740 // Check if object is in new space and jump accordingly. | |
| 1741 // Register 'object' is preserved. | |
| 1742 void JumpIfNotInNewSpace(Register object, | |
| 1743 Label* branch) { | |
| 1744 InNewSpace(object, ne, branch); | |
| 1745 } | |
| 1746 | |
| 1747 void JumpIfInNewSpace(Register object, | |
| 1748 Label* branch) { | |
| 1749 InNewSpace(object, eq, branch); | |
| 1750 } | |
| 1751 | |
| 1752 // Notify the garbage collector that we wrote a pointer into an object. | |
| 1753 // |object| is the object being stored into, |value| is the object being | |
| 1754 // stored. value and scratch registers are clobbered by the operation. | |
| 1755 // The offset is the offset from the start of the object, not the offset from | |
| 1756 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). | |
| 1757 void RecordWriteField( | |
| 1758 Register object, | |
| 1759 int offset, | |
| 1760 Register value, | |
| 1761 Register scratch, | |
| 1762 LinkRegisterStatus lr_status, | |
| 1763 SaveFPRegsMode save_fp, | |
| 1764 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, | |
| 1765 SmiCheck smi_check = INLINE_SMI_CHECK); | |
| 1766 | |
| 1767 // As above, but the offset has the tag presubtracted. For use with | |
| 1768 // MemOperand(reg, off). | |
| 1769 inline void RecordWriteContextSlot( | |
| 1770 Register context, | |
| 1771 int offset, | |
| 1772 Register value, | |
| 1773 Register scratch, | |
| 1774 LinkRegisterStatus lr_status, | |
| 1775 SaveFPRegsMode save_fp, | |
| 1776 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, | |
| 1777 SmiCheck smi_check = INLINE_SMI_CHECK) { | |
| 1778 RecordWriteField(context, | |
| 1779 offset + kHeapObjectTag, | |
| 1780 value, | |
| 1781 scratch, | |
| 1782 lr_status, | |
| 1783 save_fp, | |
| 1784 remembered_set_action, | |
| 1785 smi_check); | |
| 1786 } | |
| 1787 | |
| 1788 // For a given |object| notify the garbage collector that the slot |address| | |
| 1789 // has been written. |value| is the object being stored. The value and | |
| 1790 // address registers are clobbered by the operation. | |
| 1791 void RecordWrite( | |
| 1792 Register object, | |
| 1793 Register address, | |
| 1794 Register value, | |
| 1795 LinkRegisterStatus lr_status, | |
| 1796 SaveFPRegsMode save_fp, | |
| 1797 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, | |
| 1798 SmiCheck smi_check = INLINE_SMI_CHECK); | |
| 1799 | |
| 1800 // Checks the color of an object. If the object is already grey or black | |
| 1801 // then we just fall through, since it is already live. If it is white and | |
| 1802 // we can determine that it doesn't need to be scanned, then we just mark it | |
| 1803 // black and fall through. For the rest we jump to the label so the | |
| 1804 // incremental marker can fix its assumptions. | |
| 1805 void EnsureNotWhite(Register object, | |
| 1806 Register scratch1, | |
| 1807 Register scratch2, | |
| 1808 Register scratch3, | |
| 1809 Register scratch4, | |
| 1810 Label* object_is_white_and_not_data); | |
| 1811 | |
| 1812 // Detects conservatively whether an object is data-only, i.e. it does need to | |
| 1813 // be scanned by the garbage collector. | |
| 1814 void JumpIfDataObject(Register value, | |
| 1815 Register scratch, | |
| 1816 Label* not_data_object); | |
| 1817 | |
| 1818 // Helper for finding the mark bits for an address. | |
| 1819 // Note that the behaviour slightly differs from other architectures. | |
| 1820 // On exit: | |
| 1821 // - addr_reg is unchanged. | |
| 1822 // - The bitmap register points at the word with the mark bits. | |
| 1823 // - The shift register contains the index of the first color bit for this | |
| 1824 // object in the bitmap. | |
| 1825 inline void GetMarkBits(Register addr_reg, | |
| 1826 Register bitmap_reg, | |
| 1827 Register shift_reg); | |
| 1828 | |
| 1829 // Check if an object has a given incremental marking color. | |
| 1830 void HasColor(Register object, | |
| 1831 Register scratch0, | |
| 1832 Register scratch1, | |
| 1833 Label* has_color, | |
| 1834 int first_bit, | |
| 1835 int second_bit); | |
| 1836 | |
| 1837 void JumpIfBlack(Register object, | |
| 1838 Register scratch0, | |
| 1839 Register scratch1, | |
| 1840 Label* on_black); | |
| 1841 | |
| 1842 | |
| 1843 // Get the location of a relocated constant (its address in the constant pool) | |
| 1844 // from its load site. | |
| 1845 void GetRelocatedValueLocation(Register ldr_location, | |
| 1846 Register result); | |
| 1847 | |
| 1848 | |
| 1849 // --------------------------------------------------------------------------- | |
| 1850 // Debugging. | |
| 1851 | |
| 1852 // Calls Abort(msg) if the condition cond is not satisfied. | |
| 1853 // Use --debug_code to enable. | |
| 1854 void Assert(Condition cond, BailoutReason reason); | |
| 1855 void AssertRegisterIsClear(Register reg, BailoutReason reason); | |
| 1856 void AssertRegisterIsRoot( | |
| 1857 Register reg, | |
| 1858 Heap::RootListIndex index, | |
| 1859 BailoutReason reason = kRegisterDidNotMatchExpectedRoot); | |
| 1860 void AssertFastElements(Register elements); | |
| 1861 | |
| 1862 // Abort if the specified register contains the invalid color bit pattern. | |
| 1863 // The pattern must be in bits [1:0] of 'reg' register. | |
| 1864 // | |
| 1865 // If emit_debug_code() is false, this emits no code. | |
| 1866 void AssertHasValidColor(const Register& reg); | |
| 1867 | |
| 1868 // Abort if 'object' register doesn't point to a string object. | |
| 1869 // | |
| 1870 // If emit_debug_code() is false, this emits no code. | |
| 1871 void AssertIsString(const Register& object); | |
| 1872 | |
| 1873 // Like Assert(), but always enabled. | |
| 1874 void Check(Condition cond, BailoutReason reason); | |
| 1875 void CheckRegisterIsClear(Register reg, BailoutReason reason); | |
| 1876 | |
| 1877 // Print a message to stderr and abort execution. | |
| 1878 void Abort(BailoutReason reason); | |
| 1879 | |
| 1880 // Conditionally load the cached Array transitioned map of type | |
| 1881 // transitioned_kind from the native context if the map in register | |
| 1882 // map_in_out is the cached Array map in the native context of | |
| 1883 // expected_kind. | |
| 1884 void LoadTransitionedArrayMapConditional( | |
| 1885 ElementsKind expected_kind, | |
| 1886 ElementsKind transitioned_kind, | |
| 1887 Register map_in_out, | |
| 1888 Register scratch, | |
| 1889 Label* no_map_match); | |
| 1890 | |
| 1891 void LoadArrayFunction(Register function); | |
| 1892 void LoadGlobalFunction(int index, Register function); | |
| 1893 | |
| 1894 // Load the initial map from the global function. The registers function and | |
| 1895 // map can be the same, function is then overwritten. | |
| 1896 void LoadGlobalFunctionInitialMap(Register function, | |
| 1897 Register map, | |
| 1898 Register scratch); | |
| 1899 | |
| 1900 // -------------------------------------------------------------------------- | |
| 1901 // Set the registers used internally by the MacroAssembler as scratch | |
| 1902 // registers. These registers are used to implement behaviours which are not | |
| 1903 // directly supported by A64, and where an intermediate result is required. | |
| 1904 // | |
| 1905 // Both tmp0 and tmp1 may be set to any X register except for xzr, sp, | |
| 1906 // and StackPointer(). Also, they must not be the same register (though they | |
| 1907 // may both be NoReg). | |
| 1908 // | |
| 1909 // It is valid to set either or both of these registers to NoReg if you don't | |
| 1910 // want the MacroAssembler to use any scratch registers. In a debug build, the | |
| 1911 // Assembler will assert that any registers it uses are valid. Be aware that | |
| 1912 // this check is not present in release builds. If this is a problem, use the | |
| 1913 // Assembler directly. | |
| 1914 void SetScratchRegisters(const Register& tmp0, const Register& tmp1) { | |
| 1915 // V8 assumes the macro assembler uses ip0 and ip1 as temp registers. | |
| 1916 ASSERT(tmp0.IsNone() || tmp0.Is(ip0)); | |
| 1917 ASSERT(tmp1.IsNone() || tmp1.Is(ip1)); | |
| 1918 | |
| 1919 ASSERT(!AreAliased(xzr, csp, tmp0, tmp1)); | |
| 1920 ASSERT(!AreAliased(StackPointer(), tmp0, tmp1)); | |
| 1921 tmp0_ = tmp0; | |
| 1922 tmp1_ = tmp1; | |
| 1923 } | |
| 1924 | |
| 1925 const Register& Tmp0() const { | |
| 1926 return tmp0_; | |
| 1927 } | |
| 1928 | |
| 1929 const Register& Tmp1() const { | |
| 1930 return tmp1_; | |
| 1931 } | |
| 1932 | |
| 1933 const Register WTmp0() const { | |
| 1934 return Register::Create(tmp0_.code(), kWRegSize); | |
| 1935 } | |
| 1936 | |
| 1937 const Register WTmp1() const { | |
| 1938 return Register::Create(tmp1_.code(), kWRegSize); | |
| 1939 } | |
| 1940 | |
| 1941 void SetFPScratchRegister(const FPRegister& fptmp0) { | |
| 1942 fptmp0_ = fptmp0; | |
| 1943 } | |
| 1944 | |
| 1945 const FPRegister& FPTmp0() const { | |
| 1946 return fptmp0_; | |
| 1947 } | |
| 1948 | |
| 1949 const Register AppropriateTempFor( | |
| 1950 const Register& target, | |
| 1951 const CPURegister& forbidden = NoCPUReg) const { | |
| 1952 Register candidate = forbidden.Is(Tmp0()) ? Tmp1() : Tmp0(); | |
| 1953 ASSERT(!candidate.Is(target)); | |
| 1954 return Register::Create(candidate.code(), target.SizeInBits()); | |
| 1955 } | |
| 1956 | |
| 1957 const FPRegister AppropriateTempFor( | |
| 1958 const FPRegister& target, | |
| 1959 const CPURegister& forbidden = NoCPUReg) const { | |
| 1960 USE(forbidden); | |
| 1961 FPRegister candidate = FPTmp0(); | |
| 1962 ASSERT(!candidate.Is(forbidden)); | |
| 1963 ASSERT(!candidate.Is(target)); | |
| 1964 return FPRegister::Create(candidate.code(), target.SizeInBits()); | |
| 1965 } | |
| 1966 | |
| 1967 // Like printf, but print at run-time from generated code. | |
| 1968 // | |
| 1969 // The caller must ensure that arguments for floating-point placeholders | |
| 1970 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer | |
| 1971 // placeholders are Registers. | |
| 1972 // | |
| 1973 // A maximum of four arguments may be given to any single Printf call. The | |
| 1974 // arguments must be of the same type, but they do not need to have the same | |
| 1975 // size. | |
| 1976 // | |
| 1977 // The following registers cannot be printed: | |
| 1978 // Tmp0(), Tmp1(), StackPointer(), csp. | |
| 1979 // | |
| 1980 // This function automatically preserves caller-saved registers so that | |
| 1981 // calling code can use Printf at any point without having to worry about | |
| 1982 // corruption. The preservation mechanism generates a lot of code. If this is | |
| 1983 // a problem, preserve the important registers manually and then call | |
| 1984 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are | |
| 1985 // implicitly preserved. | |
| 1986 // | |
| 1987 // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be | |
| 1988 // preserved, and can be printed. This allows Printf to be used during debug | |
| 1989 // code. | |
| 1990 // | |
| 1991 // This function assumes (and asserts) that the current stack pointer is | |
| 1992 // callee-saved, not caller-saved. This is most likely the case anyway, as a | |
| 1993 // caller-saved stack pointer doesn't make a lot of sense. | |
| 1994 void Printf(const char * format, | |
| 1995 const CPURegister& arg0 = NoCPUReg, | |
| 1996 const CPURegister& arg1 = NoCPUReg, | |
| 1997 const CPURegister& arg2 = NoCPUReg, | |
| 1998 const CPURegister& arg3 = NoCPUReg); | |
| 1999 | |
| 2000 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'. | |
| 2001 // | |
| 2002 // The return code from the system printf call will be returned in x0. | |
| 2003 void PrintfNoPreserve(const char * format, | |
| 2004 const CPURegister& arg0 = NoCPUReg, | |
| 2005 const CPURegister& arg1 = NoCPUReg, | |
| 2006 const CPURegister& arg2 = NoCPUReg, | |
| 2007 const CPURegister& arg3 = NoCPUReg); | |
| 2008 | |
| 2009 // Code ageing support functions. | |
| 2010 | |
| 2011 // Code ageing on A64 works similarly to on ARM. When V8 wants to mark a | |
| 2012 // function as old, it replaces some of the function prologue (generated by | |
| 2013 // FullCodeGenerator::Generate) with a call to a special stub (ultimately | |
| 2014 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the | |
| 2015 // function prologue to its initial young state (indicating that it has been | |
| 2016 // recently run) and continues. A young function is therefore one which has a | |
| 2017 // normal frame setup sequence, and an old function has a code age sequence | |
| 2018 // which calls a code ageing stub. | |
| 2019 | |
| 2020 // Set up a basic stack frame for young code (or code exempt from ageing) with | |
| 2021 // type FUNCTION. It may be patched later for code ageing support. This is | |
| 2022 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence. | |
| 2023 // | |
| 2024 // This function takes an Assembler so it can be called from either a | |
| 2025 // MacroAssembler or a PatchingAssembler context. | |
| 2026 static void EmitFrameSetupForCodeAgePatching(Assembler* assm); | |
| 2027 | |
| 2028 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context. | |
| 2029 void EmitFrameSetupForCodeAgePatching(); | |
| 2030 | |
| 2031 // Emit a code age sequence that calls the relevant code age stub. The code | |
| 2032 // generated by this sequence is expected to replace the code generated by | |
| 2033 // EmitFrameSetupForCodeAgePatching, and represents an old function. | |
| 2034 // | |
| 2035 // If stub is NULL, this function generates the code age sequence but omits | |
| 2036 // the stub address that is normally embedded in the instruction stream. This | |
| 2037 // can be used by debug code to verify code age sequences. | |
| 2038 static void EmitCodeAgeSequence(Assembler* assm, Code* stub); | |
| 2039 | |
| 2040 // Call EmitCodeAgeSequence from a MacroAssembler context. | |
| 2041 void EmitCodeAgeSequence(Code* stub); | |
| 2042 | |
| 2043 // Return true if the sequence is a young sequence geneated by | |
| 2044 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the | |
| 2045 // sequence is a code age sequence (emitted by EmitCodeAgeSequence). | |
| 2046 static bool IsYoungSequence(byte* sequence); | |
| 2047 | |
| 2048 #ifdef DEBUG | |
| 2049 // Return true if the sequence is a code age sequence generated by | |
| 2050 // EmitCodeAgeSequence. | |
| 2051 static bool IsCodeAgeSequence(byte* sequence); | |
| 2052 #endif | |
| 2053 | |
| 2054 // Jumps to found label if a prototype map has dictionary elements. | |
| 2055 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, | |
| 2056 Register scratch1, Label* found); | |
| 2057 | |
| 2058 private: | |
| 2059 // Helpers for CopyFields. | |
| 2060 // These each implement CopyFields in a different way. | |
| 2061 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count, | |
| 2062 Register scratch1, Register scratch2, | |
| 2063 Register scratch3); | |
| 2064 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count, | |
| 2065 Register scratch1, Register scratch2); | |
| 2066 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count, | |
| 2067 Register scratch1); | |
| 2068 | |
| 2069 // The actual Push and Pop implementations. These don't generate any code | |
| 2070 // other than that required for the push or pop. This allows | |
| 2071 // (Push|Pop)CPURegList to bundle together run-time assertions for a large | |
| 2072 // block of registers. | |
| 2073 // | |
| 2074 // Note that size is per register, and is specified in bytes. | |
| 2075 void PushHelper(int count, int size, | |
| 2076 const CPURegister& src0, const CPURegister& src1, | |
| 2077 const CPURegister& src2, const CPURegister& src3); | |
| 2078 void PopHelper(int count, int size, | |
| 2079 const CPURegister& dst0, const CPURegister& dst1, | |
| 2080 const CPURegister& dst2, const CPURegister& dst3); | |
| 2081 | |
| 2082 // Perform necessary maintenance operations before a push or pop. | |
| 2083 // | |
| 2084 // Note that size is specified in bytes. | |
| 2085 void PrepareForPush(Operand total_size); | |
| 2086 void PrepareForPop(Operand total_size); | |
| 2087 | |
| 2088 void PrepareForPush(int count, int size) { PrepareForPush(count * size); } | |
| 2089 void PrepareForPop(int count, int size) { PrepareForPop(count * size); } | |
| 2090 | |
| 2091 // Call Printf. On a native build, a simple call will be generated, but if the | |
| 2092 // simulator is being used then a suitable pseudo-instruction is used. The | |
| 2093 // arguments and stack (csp) must be prepared by the caller as for a normal | |
| 2094 // AAPCS64 call to 'printf'. | |
| 2095 // | |
| 2096 // The 'type' argument specifies the type of the optional arguments. | |
| 2097 void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister); | |
| 2098 | |
| 2099 // Helper for throwing exceptions. Compute a handler address and jump to | |
| 2100 // it. See the implementation for register usage. | |
| 2101 void JumpToHandlerEntry(Register exception, | |
| 2102 Register object, | |
| 2103 Register state, | |
| 2104 Register scratch1, | |
| 2105 Register scratch2); | |
| 2106 | |
| 2107 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. | |
| 2108 void InNewSpace(Register object, | |
| 2109 Condition cond, // eq for new space, ne otherwise. | |
| 2110 Label* branch); | |
| 2111 | |
| 2112 // Try to convert a double to an int so that integer fast-paths may be | |
| 2113 // used. Not every valid integer value is guaranteed to be caught. | |
| 2114 // It supports both 32-bit and 64-bit integers depending whether 'as_int' | |
| 2115 // is a W or X register. | |
| 2116 // | |
| 2117 // This does not distinguish between +0 and -0, so if this distinction is | |
| 2118 // important it must be checked separately. | |
| 2119 void TryConvertDoubleToInt(Register as_int, | |
| 2120 FPRegister value, | |
| 2121 FPRegister scratch_d, | |
| 2122 Label* on_successful_conversion, | |
| 2123 Label* on_failed_conversion = NULL); | |
| 2124 | |
| 2125 bool generating_stub_; | |
| 2126 #if DEBUG | |
| 2127 // Tell whether any of the macro instruction can be used. When false the | |
| 2128 // MacroAssembler will assert if a method which can emit a variable number | |
| 2129 // of instructions is called. | |
| 2130 bool allow_macro_instructions_; | |
| 2131 #endif | |
| 2132 bool has_frame_; | |
| 2133 | |
| 2134 // The Abort method should call a V8 runtime function, but the CallRuntime | |
| 2135 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will | |
| 2136 // use a simpler abort mechanism that doesn't depend on CEntryStub. | |
| 2137 // | |
| 2138 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is | |
| 2139 // being generated. | |
| 2140 bool use_real_aborts_; | |
| 2141 | |
| 2142 // This handle will be patched with the code object on installation. | |
| 2143 Handle<Object> code_object_; | |
| 2144 | |
| 2145 // The register to use as a stack pointer for stack operations. | |
| 2146 Register sp_; | |
| 2147 | |
| 2148 // Scratch registers used internally by the MacroAssembler. | |
| 2149 Register tmp0_; | |
| 2150 Register tmp1_; | |
| 2151 FPRegister fptmp0_; | |
| 2152 | |
| 2153 void InitializeNewString(Register string, | |
| 2154 Register length, | |
| 2155 Heap::RootListIndex map_index, | |
| 2156 Register scratch1, | |
| 2157 Register scratch2); | |
| 2158 | |
| 2159 public: | |
| 2160 // Far branches resolving. | |
| 2161 // | |
| 2162 // The various classes of branch instructions with immediate offsets have | |
| 2163 // different ranges. While the Assembler will fail to assemble a branch | |
| 2164 // exceeding its range, the MacroAssembler offers a mechanism to resolve | |
| 2165 // branches to too distant targets, either by tweaking the generated code to | |
| 2166 // use branch instructions with wider ranges or generating veneers. | |
| 2167 // | |
| 2168 // Currently branches to distant targets are resolved using unconditional | |
| 2169 // branch isntructions with a range of +-128MB. If that becomes too little | |
| 2170 // (!), the mechanism can be extended to generate special veneers for really | |
| 2171 // far targets. | |
| 2172 | |
| 2173 // Returns true if we should emit a veneer as soon as possible for a branch | |
| 2174 // which can at most reach to specified pc. | |
| 2175 bool ShouldEmitVeneer(int max_reachable_pc, | |
| 2176 int margin = kVeneerDistanceMargin); | |
| 2177 | |
| 2178 // The maximum code size generated for a veneer. Currently one branch | |
| 2179 // instruction. This is for code size checking purposes, and can be extended | |
| 2180 // in the future for example if we decide to add nops between the veneers. | |
| 2181 static const int kMaxVeneerCodeSize = 1 * kInstructionSize; | |
| 2182 | |
| 2183 // Emits veneers for branches that are approaching their maximum range. | |
| 2184 // If need_protection is true, the veneers are protected by a branch jumping | |
| 2185 // over the code. | |
| 2186 void EmitVeneers(bool need_protection); | |
| 2187 void EmitVeneersGuard(); | |
| 2188 // Checks wether veneers need to be emitted at this point. | |
| 2189 void CheckVeneers(bool need_protection); | |
| 2190 | |
| 2191 // Helps resolve branching to labels potentially out of range. | |
| 2192 // If the label is not bound, it registers the information necessary to later | |
| 2193 // be able to emit a veneer for this branch if necessary. | |
| 2194 // If the label is bound, it returns true if the label (or the previous link | |
| 2195 // in the label chain) is out of range. In that case the caller is responsible | |
| 2196 // for generating appropriate code. | |
| 2197 // Otherwise it returns false. | |
| 2198 // This function also checks wether veneers need to be emitted. | |
| 2199 bool NeedExtraInstructionsOrRegisterBranch(Label *label, | |
| 2200 ImmBranchType branch_type); | |
| 2201 | |
| 2202 private: | |
| 2203 // We generate a veneer for a branch if we reach within this distance of the | |
| 2204 // limit of the range. | |
| 2205 static const int kVeneerDistanceMargin = 4 * KB; | |
| 2206 int unresolved_branches_first_limit() const { | |
| 2207 ASSERT(!unresolved_branches_.empty()); | |
| 2208 return unresolved_branches_.begin()->first; | |
| 2209 } | |
| 2210 }; | |
| 2211 | |
| 2212 | |
| 2213 // Use this scope when you need a one-to-one mapping bewteen methods and | |
| 2214 // instructions. This scope prevents the MacroAssembler from being called and | |
| 2215 // literal pools from being emitted. It also asserts the number of instructions | |
| 2216 // emitted is what you specified when creating the scope. | |
| 2217 class InstructionAccurateScope BASE_EMBEDDED { | |
| 2218 public: | |
| 2219 InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) | |
| 2220 : masm_(masm) | |
| 2221 #ifdef DEBUG | |
| 2222 , | |
| 2223 size_(count * kInstructionSize) | |
| 2224 #endif | |
| 2225 { | |
| 2226 // Before blocking the const pool, see if it needs to be emitted. | |
| 2227 masm_->CheckConstPool(false, true); | |
| 2228 | |
| 2229 masm_->StartBlockConstPool(); | |
| 2230 #ifdef DEBUG | |
| 2231 if (count != 0) { | |
| 2232 masm_->bind(&start_); | |
| 2233 } | |
| 2234 previous_allow_macro_instructions_ = masm_->allow_macro_instructions(); | |
| 2235 masm_->set_allow_macro_instructions(false); | |
| 2236 #endif | |
| 2237 } | |
| 2238 | |
| 2239 ~InstructionAccurateScope() { | |
| 2240 masm_->EndBlockConstPool(); | |
| 2241 #ifdef DEBUG | |
| 2242 if (start_.is_bound()) { | |
| 2243 ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_); | |
| 2244 } | |
| 2245 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_); | |
| 2246 #endif | |
| 2247 } | |
| 2248 | |
| 2249 private: | |
| 2250 MacroAssembler* masm_; | |
| 2251 #ifdef DEBUG | |
| 2252 size_t size_; | |
| 2253 Label start_; | |
| 2254 bool previous_allow_macro_instructions_; | |
| 2255 #endif | |
| 2256 }; | |
| 2257 | |
| 2258 | |
| 2259 inline MemOperand ContextMemOperand(Register context, int index) { | |
| 2260 return MemOperand(context, Context::SlotOffset(index)); | |
| 2261 } | |
| 2262 | |
| 2263 inline MemOperand GlobalObjectMemOperand() { | |
| 2264 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX); | |
| 2265 } | |
| 2266 | |
| 2267 | |
| 2268 // Encode and decode information about patchable inline SMI checks. | |
| 2269 class InlineSmiCheckInfo { | |
| 2270 public: | |
| 2271 explicit InlineSmiCheckInfo(Address info); | |
| 2272 | |
| 2273 bool HasSmiCheck() const { | |
| 2274 return smi_check_ != NULL; | |
| 2275 } | |
| 2276 | |
| 2277 const Register& SmiRegister() const { | |
| 2278 return reg_; | |
| 2279 } | |
| 2280 | |
| 2281 Instruction* SmiCheck() const { | |
| 2282 return smi_check_; | |
| 2283 } | |
| 2284 | |
| 2285 // Use MacroAssembler::InlineData to emit information about patchable inline | |
| 2286 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to | |
| 2287 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp. | |
| 2288 // | |
| 2289 // The generated patch information can be read using the InlineSMICheckInfo | |
| 2290 // class. | |
| 2291 static void Emit(MacroAssembler* masm, const Register& reg, | |
| 2292 const Label* smi_check); | |
| 2293 | |
| 2294 // Emit information to indicate that there is no inline SMI check. | |
| 2295 static void EmitNotInlined(MacroAssembler* masm) { | |
| 2296 Label unbound; | |
| 2297 Emit(masm, NoReg, &unbound); | |
| 2298 } | |
| 2299 | |
| 2300 private: | |
| 2301 Register reg_; | |
| 2302 Instruction* smi_check_; | |
| 2303 | |
| 2304 // Fields in the data encoded by InlineData. | |
| 2305 | |
| 2306 // A width of 5 (Rd_width) for the SMI register preclues the use of csp, | |
| 2307 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be | |
| 2308 // used in a patchable check. The Emit() method checks this. | |
| 2309 // | |
| 2310 // Note that the total size of the fields is restricted by the underlying | |
| 2311 // storage size handled by the BitField class, which is a uint32_t. | |
| 2312 class RegisterBits : public BitField<unsigned, 0, 5> {}; | |
| 2313 class DeltaBits : public BitField<uint32_t, 5, 32-5> {}; | |
| 2314 }; | |
| 2315 | |
| 2316 } } // namespace v8::internal | |
| 2317 | |
| 2318 #ifdef GENERATED_CODE_COVERAGE | |
| 2319 #error "Unsupported option" | |
| 2320 #define CODE_COVERAGE_STRINGIFY(x) #x | |
| 2321 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) | |
| 2322 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) | |
| 2323 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> | |
| 2324 #else | |
| 2325 #define ACCESS_MASM(masm) masm-> | |
| 2326 #endif | |
| 2327 | |
| 2328 #endif // V8_A64_MACRO_ASSEMBLER_A64_H_ | |
| OLD | NEW |