| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #if defined(V8_TARGET_ARCH_A64) |
| 31 |
| 32 #include "bootstrapper.h" |
| 33 #include "codegen.h" |
| 34 #include "debug.h" |
| 35 #include "runtime.h" |
| 36 |
| 37 namespace v8 { |
| 38 namespace internal { |
| 39 |
| 40 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros. |
| 41 #define __ |
| 42 |
| 43 |
| 44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, |
| 45 byte * buffer, |
| 46 unsigned buffer_size) |
| 47 : Assembler(arg_isolate, buffer, buffer_size), |
| 48 generating_stub_(false), |
| 49 allow_stub_calls_(true), |
| 50 #if DEBUG |
| 51 allow_macro_instructions_(true), |
| 52 #endif |
| 53 has_frame_(false), |
| 54 use_real_aborts_(true), |
| 55 sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) { |
| 56 if (isolate() != NULL) { |
| 57 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
| 58 isolate()); |
| 59 } |
| 60 } |
| 61 |
| 62 |
| 63 void MacroAssembler::LogicalMacro(const Register& rd, |
| 64 const Register& rn, |
| 65 const Operand& operand, |
| 66 LogicalOp op) { |
| 67 if (operand.NeedsRelocation()) { |
| 68 LoadRelocated(Tmp0(), operand); |
| 69 Logical(rd, rn, Tmp0(), op); |
| 70 |
| 71 } else if (operand.IsImmediate()) { |
| 72 int64_t immediate = operand.immediate(); |
| 73 unsigned reg_size = rd.SizeInBits(); |
| 74 ASSERT(rd.Is64Bits() || is_uint32(immediate)); |
| 75 |
| 76 // If the operation is NOT, invert the operation and immediate. |
| 77 if ((op & NOT) == NOT) { |
| 78 op = static_cast<LogicalOp>(op & ~NOT); |
| 79 immediate = ~immediate; |
| 80 if (rd.Is32Bits()) { |
| 81 immediate &= kWRegMask; |
| 82 } |
| 83 } |
| 84 |
| 85 // Special cases for all set or all clear immediates. |
| 86 if (immediate == 0) { |
| 87 switch (op) { |
| 88 case AND: |
| 89 Mov(rd, 0); |
| 90 return; |
| 91 case ORR: // Fall through. |
| 92 case EOR: |
| 93 Mov(rd, rn); |
| 94 return; |
| 95 case ANDS: // Fall through. |
| 96 case BICS: |
| 97 break; |
| 98 default: |
| 99 UNREACHABLE(); |
| 100 } |
| 101 } else if ((rd.Is64Bits() && (immediate == -1L)) || |
| 102 (rd.Is32Bits() && (immediate == 0xffffffffL))) { |
| 103 switch (op) { |
| 104 case AND: |
| 105 Mov(rd, rn); |
| 106 return; |
| 107 case ORR: |
| 108 Mov(rd, immediate); |
| 109 return; |
| 110 case EOR: |
| 111 Mvn(rd, rn); |
| 112 return; |
| 113 case ANDS: // Fall through. |
| 114 case BICS: |
| 115 break; |
| 116 default: |
| 117 UNREACHABLE(); |
| 118 } |
| 119 } |
| 120 |
| 121 unsigned n, imm_s, imm_r; |
| 122 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { |
| 123 // Immediate can be encoded in the instruction. |
| 124 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); |
| 125 } else { |
| 126 // Immediate can't be encoded: synthesize using move immediate. |
| 127 Register temp = AppropriateTempFor(rn); |
| 128 Mov(temp, immediate); |
| 129 if (rd.Is(csp)) { |
| 130 // If rd is the stack pointer we cannot use it as the destination |
| 131 // register so we use the temp register as an intermediate again. |
| 132 Logical(temp, rn, temp, op); |
| 133 Mov(csp, temp); |
| 134 } else { |
| 135 Logical(rd, rn, temp, op); |
| 136 } |
| 137 } |
| 138 |
| 139 } else if (operand.IsExtendedRegister()) { |
| 140 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
| 141 // Add/sub extended supports shift <= 4. We want to support exactly the |
| 142 // same modes here. |
| 143 ASSERT(operand.shift_amount() <= 4); |
| 144 ASSERT(operand.reg().Is64Bits() || |
| 145 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
| 146 Register temp = AppropriateTempFor(rn, operand.reg()); |
| 147 EmitExtendShift(temp, operand.reg(), operand.extend(), |
| 148 operand.shift_amount()); |
| 149 Logical(rd, rn, temp, op); |
| 150 |
| 151 } else { |
| 152 // The operand can be encoded in the instruction. |
| 153 ASSERT(operand.IsShiftedRegister()); |
| 154 Logical(rd, rn, operand, op); |
| 155 } |
| 156 } |
| 157 |
| 158 |
| 159 void MacroAssembler::Mov(const Register& rd, uint64_t imm) { |
| 160 ASSERT(allow_macro_instructions_); |
| 161 ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); |
| 162 ASSERT(!rd.IsZero()); |
| 163 |
| 164 // TODO(all) extend to support more immediates. |
| 165 // |
| 166 // Immediates on Aarch64 can be produced using an initial value, and zero to |
| 167 // three move keep operations. |
| 168 // |
| 169 // Initial values can be generated with: |
| 170 // 1. 64-bit move zero (movz). |
| 171 // 2. 32-bit move inverted (movn). |
| 172 // 3. 64-bit move inverted. |
| 173 // 4. 32-bit orr immediate. |
| 174 // 5. 64-bit orr immediate. |
| 175 // Move-keep may then be used to modify each of the 16-bit half-words. |
| 176 // |
| 177 // The code below supports all five initial value generators, and |
| 178 // applying move-keep operations to move-zero and move-inverted initial |
| 179 // values. |
| 180 |
| 181 unsigned reg_size = rd.SizeInBits(); |
| 182 unsigned n, imm_s, imm_r; |
| 183 if (IsImmMovz(imm, reg_size) && !rd.IsSP()) { |
| 184 // Immediate can be represented in a move zero instruction. Movz can't |
| 185 // write to the stack pointer. |
| 186 movz(rd, imm); |
| 187 } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) { |
| 188 // Immediate can be represented in a move inverted instruction. Movn can't |
| 189 // write to the stack pointer. |
| 190 movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask)); |
| 191 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { |
| 192 // Immediate can be represented in a logical orr instruction. |
| 193 LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR); |
| 194 } else { |
| 195 // Generic immediate case. Imm will be represented by |
| 196 // [imm3, imm2, imm1, imm0], where each imm is 16 bits. |
| 197 // A move-zero or move-inverted is generated for the first non-zero or |
| 198 // non-0xffff immX, and a move-keep for subsequent non-zero immX. |
| 199 |
| 200 uint64_t ignored_halfword = 0; |
| 201 bool invert_move = false; |
| 202 // If the number of 0xffff halfwords is greater than the number of 0x0000 |
| 203 // halfwords, it's more efficient to use move-inverted. |
| 204 if (CountClearHalfWords(~imm, reg_size) > |
| 205 CountClearHalfWords(imm, reg_size)) { |
| 206 ignored_halfword = 0xffffL; |
| 207 invert_move = true; |
| 208 } |
| 209 |
| 210 // Mov instructions can't move value into the stack pointer, so set up a |
| 211 // temporary register, if needed. |
| 212 Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd; |
| 213 |
| 214 // Iterate through the halfwords. Use movn/movz for the first non-ignored |
| 215 // halfword, and movk for subsequent halfwords. |
| 216 ASSERT((reg_size % 16) == 0); |
| 217 bool first_mov_done = false; |
| 218 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { |
| 219 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; |
| 220 if (imm16 != ignored_halfword) { |
| 221 if (!first_mov_done) { |
| 222 if (invert_move) { |
| 223 movn(temp, (~imm16) & 0xffffL, 16 * i); |
| 224 } else { |
| 225 movz(temp, imm16, 16 * i); |
| 226 } |
| 227 first_mov_done = true; |
| 228 } else { |
| 229 // Construct a wider constant. |
| 230 movk(temp, imm16, 16 * i); |
| 231 } |
| 232 } |
| 233 } |
| 234 ASSERT(first_mov_done); |
| 235 |
| 236 // Move the temporary if the original destination register was the stack |
| 237 // pointer. |
| 238 if (rd.IsSP()) { |
| 239 mov(rd, temp); |
| 240 } |
| 241 } |
| 242 } |
| 243 |
| 244 |
| 245 void MacroAssembler::Mov(const Register& rd, |
| 246 const Operand& operand, |
| 247 DiscardMoveMode discard_mode) { |
| 248 ASSERT(allow_macro_instructions_); |
| 249 ASSERT(!rd.IsZero()); |
| 250 // Provide a swap register for instructions that need to write into the |
| 251 // system stack pointer (and can't do this inherently). |
| 252 Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd); |
| 253 |
| 254 if (operand.NeedsRelocation()) { |
| 255 LoadRelocated(dst, operand); |
| 256 |
| 257 } else if (operand.IsImmediate()) { |
| 258 // Call the macro assembler for generic immediates. |
| 259 Mov(dst, operand.immediate()); |
| 260 |
| 261 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
| 262 // Emit a shift instruction if moving a shifted register. This operation |
| 263 // could also be achieved using an orr instruction (like orn used by Mvn), |
| 264 // but using a shift instruction makes the disassembly clearer. |
| 265 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount()); |
| 266 |
| 267 } else if (operand.IsExtendedRegister()) { |
| 268 // Emit an extend instruction if moving an extended register. This handles |
| 269 // extend with post-shift operations, too. |
| 270 EmitExtendShift(dst, operand.reg(), operand.extend(), |
| 271 operand.shift_amount()); |
| 272 |
| 273 } else { |
| 274 // Otherwise, emit a register move only if the registers are distinct, or |
| 275 // if they are not X registers. |
| 276 // |
| 277 // Note that mov(w0, w0) is not a no-op because it clears the top word of |
| 278 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W |
| 279 // registers is not required to clear the top word of the X register. In |
| 280 // this case, the instruction is discarded. |
| 281 // |
| 282 // If csp is an operand, add #0 is emitted, otherwise, orr #0. |
| 283 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && |
| 284 (discard_mode == kDontDiscardForSameWReg))) { |
| 285 Assembler::mov(rd, operand.reg()); |
| 286 } |
| 287 // This case can handle writes into the system stack pointer directly. |
| 288 dst = rd; |
| 289 } |
| 290 |
| 291 // Copy the result to the system stack pointer. |
| 292 if (!dst.Is(rd)) { |
| 293 ASSERT(rd.IsZero()); |
| 294 ASSERT(dst.Is(Tmp1())); |
| 295 Assembler::mov(rd, dst); |
| 296 } |
| 297 } |
| 298 |
| 299 |
| 300 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { |
| 301 ASSERT(allow_macro_instructions_); |
| 302 |
| 303 if (operand.NeedsRelocation()) { |
| 304 LoadRelocated(Tmp0(), operand); |
| 305 Mvn(rd, Tmp0()); |
| 306 |
| 307 } else if (operand.IsImmediate()) { |
| 308 // Call the macro assembler for generic immediates. |
| 309 Mov(rd, ~operand.immediate()); |
| 310 |
| 311 } else if (operand.IsExtendedRegister()) { |
| 312 // Emit two instructions for the extend case. This differs from Mov, as |
| 313 // the extend and invert can't be achieved in one instruction. |
| 314 Register temp = AppropriateTempFor(rd, operand.reg()); |
| 315 EmitExtendShift(temp, operand.reg(), operand.extend(), |
| 316 operand.shift_amount()); |
| 317 mvn(rd, temp); |
| 318 |
| 319 } else { |
| 320 // Otherwise, emit a register move only if the registers are distinct. |
| 321 // If the jssp is an operand, add #0 is emitted, otherwise, orr #0. |
| 322 mvn(rd, operand); |
| 323 } |
| 324 } |
| 325 |
| 326 |
| 327 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { |
| 328 ASSERT((reg_size % 8) == 0); |
| 329 int count = 0; |
| 330 for (unsigned i = 0; i < (reg_size / 16); i++) { |
| 331 if ((imm & 0xffff) == 0) { |
| 332 count++; |
| 333 } |
| 334 imm >>= 16; |
| 335 } |
| 336 return count; |
| 337 } |
| 338 |
| 339 |
| 340 // The movz instruction can generate immediates containing an arbitrary 16-bit |
| 341 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. |
| 342 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { |
| 343 ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); |
| 344 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); |
| 345 } |
| 346 |
| 347 |
| 348 // The movn instruction can generate immediates containing an arbitrary 16-bit |
| 349 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. |
| 350 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { |
| 351 return IsImmMovz(~imm, reg_size); |
| 352 } |
| 353 |
| 354 |
| 355 void MacroAssembler::ConditionalCompareMacro(const Register& rn, |
| 356 const Operand& operand, |
| 357 StatusFlags nzcv, |
| 358 Condition cond, |
| 359 ConditionalCompareOp op) { |
| 360 ASSERT((cond != al) && (cond != nv)); |
| 361 if (operand.NeedsRelocation()) { |
| 362 LoadRelocated(Tmp0(), operand); |
| 363 ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op); |
| 364 |
| 365 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || |
| 366 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { |
| 367 // The immediate can be encoded in the instruction, or the operand is an |
| 368 // unshifted register: call the assembler. |
| 369 ConditionalCompare(rn, operand, nzcv, cond, op); |
| 370 |
| 371 } else { |
| 372 // The operand isn't directly supported by the instruction: perform the |
| 373 // operation on a temporary register. |
| 374 Register temp = AppropriateTempFor(rn); |
| 375 Mov(temp, operand); |
| 376 ConditionalCompare(rn, temp, nzcv, cond, op); |
| 377 } |
| 378 } |
| 379 |
| 380 |
| 381 void MacroAssembler::Csel(const Register& rd, |
| 382 const Register& rn, |
| 383 const Operand& operand, |
| 384 Condition cond) { |
| 385 ASSERT(allow_macro_instructions_); |
| 386 ASSERT(!rd.IsZero()); |
| 387 ASSERT((cond != al) && (cond != nv)); |
| 388 if (operand.IsImmediate()) { |
| 389 // Immediate argument. Handle special cases of 0, 1 and -1 using zero |
| 390 // register. |
| 391 int64_t imm = operand.immediate(); |
| 392 Register zr = AppropriateZeroRegFor(rn); |
| 393 if (imm == 0) { |
| 394 csel(rd, rn, zr, cond); |
| 395 } else if (imm == 1) { |
| 396 csinc(rd, rn, zr, cond); |
| 397 } else if (imm == -1) { |
| 398 csinv(rd, rn, zr, cond); |
| 399 } else { |
| 400 Register temp = AppropriateTempFor(rn); |
| 401 Mov(temp, operand.immediate()); |
| 402 csel(rd, rn, temp, cond); |
| 403 } |
| 404 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { |
| 405 // Unshifted register argument. |
| 406 csel(rd, rn, operand.reg(), cond); |
| 407 } else { |
| 408 // All other arguments. |
| 409 Register temp = AppropriateTempFor(rn); |
| 410 Mov(temp, operand); |
| 411 csel(rd, rn, temp, cond); |
| 412 } |
| 413 } |
| 414 |
| 415 |
| 416 void MacroAssembler::AddSubMacro(const Register& rd, |
| 417 const Register& rn, |
| 418 const Operand& operand, |
| 419 FlagsUpdate S, |
| 420 AddSubOp op) { |
| 421 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && |
| 422 !operand.NeedsRelocation() && (S == LeaveFlags)) { |
| 423 // The instruction would be a nop. Avoid generating useless code. |
| 424 return; |
| 425 } |
| 426 |
| 427 if (operand.NeedsRelocation()) { |
| 428 LoadRelocated(Tmp0(), operand); |
| 429 AddSubMacro(rd, rn, Tmp0(), S, op); |
| 430 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || |
| 431 (rn.IsZero() && !operand.IsShiftedRegister()) || |
| 432 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
| 433 Register temp = AppropriateTempFor(rn); |
| 434 Mov(temp, operand); |
| 435 AddSub(rd, rn, temp, S, op); |
| 436 } else { |
| 437 AddSub(rd, rn, operand, S, op); |
| 438 } |
| 439 } |
| 440 |
| 441 |
| 442 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, |
| 443 const Register& rn, |
| 444 const Operand& operand, |
| 445 FlagsUpdate S, |
| 446 AddSubWithCarryOp op) { |
| 447 ASSERT(rd.SizeInBits() == rn.SizeInBits()); |
| 448 |
| 449 if (operand.NeedsRelocation()) { |
| 450 LoadRelocated(Tmp0(), operand); |
| 451 AddSubWithCarryMacro(rd, rn, Tmp0(), S, op); |
| 452 |
| 453 } else if (operand.IsImmediate() || |
| 454 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
| 455 // Add/sub with carry (immediate or ROR shifted register.) |
| 456 Register temp = AppropriateTempFor(rn); |
| 457 Mov(temp, operand); |
| 458 AddSubWithCarry(rd, rn, temp, S, op); |
| 459 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
| 460 // Add/sub with carry (shifted register). |
| 461 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); |
| 462 ASSERT(operand.shift() != ROR); |
| 463 ASSERT(is_uintn(operand.shift_amount(), |
| 464 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2)); |
| 465 Register temp = AppropriateTempFor(rn, operand.reg()); |
| 466 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); |
| 467 AddSubWithCarry(rd, rn, temp, S, op); |
| 468 |
| 469 } else if (operand.IsExtendedRegister()) { |
| 470 // Add/sub with carry (extended register). |
| 471 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
| 472 // Add/sub extended supports a shift <= 4. We want to support exactly the |
| 473 // same modes. |
| 474 ASSERT(operand.shift_amount() <= 4); |
| 475 ASSERT(operand.reg().Is64Bits() || |
| 476 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
| 477 Register temp = AppropriateTempFor(rn, operand.reg()); |
| 478 EmitExtendShift(temp, operand.reg(), operand.extend(), |
| 479 operand.shift_amount()); |
| 480 AddSubWithCarry(rd, rn, temp, S, op); |
| 481 |
| 482 } else { |
| 483 // The addressing mode is directly supported by the instruction. |
| 484 AddSubWithCarry(rd, rn, operand, S, op); |
| 485 } |
| 486 } |
| 487 |
| 488 |
| 489 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, |
| 490 const MemOperand& addr, |
| 491 LoadStoreOp op) { |
| 492 int64_t offset = addr.offset(); |
| 493 LSDataSize size = CalcLSDataSize(op); |
| 494 |
| 495 // Check if an immediate offset fits in the immediate field of the |
| 496 // appropriate instruction. If not, emit two instructions to perform |
| 497 // the operation. |
| 498 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && |
| 499 !IsImmLSUnscaled(offset)) { |
| 500 // Immediate offset that can't be encoded using unsigned or unscaled |
| 501 // addressing modes. |
| 502 Register temp = AppropriateTempFor(addr.base()); |
| 503 Mov(temp, addr.offset()); |
| 504 LoadStore(rt, MemOperand(addr.base(), temp), op); |
| 505 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { |
| 506 // Post-index beyond unscaled addressing range. |
| 507 LoadStore(rt, MemOperand(addr.base()), op); |
| 508 add(addr.base(), addr.base(), offset); |
| 509 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { |
| 510 // Pre-index beyond unscaled addressing range. |
| 511 add(addr.base(), addr.base(), offset); |
| 512 LoadStore(rt, MemOperand(addr.base()), op); |
| 513 } else { |
| 514 // Encodable in one load/store instruction. |
| 515 LoadStore(rt, addr, op); |
| 516 } |
| 517 } |
| 518 |
| 519 |
| 520 // Pseudo-instructions. |
| 521 |
| 522 |
| 523 void MacroAssembler::Abs(const Register& rd, const Register& rm, |
| 524 Label * is_not_representable, |
| 525 Label * is_representable) { |
| 526 ASSERT(allow_macro_instructions_); |
| 527 ASSERT(AreSameSizeAndType(rd, rm)); |
| 528 |
| 529 Cmp(rm, 1); |
| 530 Cneg(rd, rm, lt); |
| 531 |
| 532 // If the comparison set the v flag, the input was the smallest value |
| 533 // representable by rm, and the mathematical result of abs(rm) is not |
| 534 // representable using two's complement. |
| 535 if ((is_not_representable != NULL) && (is_representable != NULL)) { |
| 536 B(is_not_representable, vs); |
| 537 B(is_representable); |
| 538 } else if (is_not_representable != NULL) { |
| 539 B(is_not_representable, vs); |
| 540 } else if (is_representable != NULL) { |
| 541 B(is_representable, vc); |
| 542 } |
| 543 } |
| 544 |
| 545 |
| 546 // Abstracted stack operations. |
| 547 |
| 548 |
| 549 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, |
| 550 const CPURegister& src2, const CPURegister& src3) { |
| 551 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); |
| 552 ASSERT(src0.IsValid()); |
| 553 |
| 554 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); |
| 555 int size = src0.SizeInBytes(); |
| 556 |
| 557 PrepareForPush(count, size); |
| 558 PushHelper(count, size, src0, src1, src2, src3); |
| 559 } |
| 560 |
| 561 |
| 562 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, |
| 563 const CPURegister& dst2, const CPURegister& dst3) { |
| 564 // It is not valid to pop into the same register more than once in one |
| 565 // instruction, not even into the zero register. |
| 566 ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); |
| 567 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); |
| 568 ASSERT(dst0.IsValid()); |
| 569 |
| 570 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); |
| 571 int size = dst0.SizeInBytes(); |
| 572 |
| 573 PrepareForPop(count, size); |
| 574 PopHelper(count, size, dst0, dst1, dst2, dst3); |
| 575 |
| 576 if (!csp.Is(StackPointer()) && emit_debug_code()) { |
| 577 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
| 578 // but if we keep it matching StackPointer, the simulator can detect memory |
| 579 // accesses in the now-free part of the stack. |
| 580 Mov(csp, StackPointer()); |
| 581 } |
| 582 } |
| 583 |
| 584 |
| 585 void MacroAssembler::PushCPURegList(CPURegList registers) { |
| 586 int size = registers.RegisterSizeInBytes(); |
| 587 |
| 588 PrepareForPush(registers.Count(), size); |
| 589 // Push up to four registers at a time because if the current stack pointer is |
| 590 // csp and reg_size is 32, registers must be pushed in blocks of four in order |
| 591 // to maintain the 16-byte alignment for csp. |
| 592 while (!registers.IsEmpty()) { |
| 593 int count_before = registers.Count(); |
| 594 const CPURegister& src0 = registers.PopHighestIndex(); |
| 595 const CPURegister& src1 = registers.PopHighestIndex(); |
| 596 const CPURegister& src2 = registers.PopHighestIndex(); |
| 597 const CPURegister& src3 = registers.PopHighestIndex(); |
| 598 int count = count_before - registers.Count(); |
| 599 PushHelper(count, size, src0, src1, src2, src3); |
| 600 } |
| 601 } |
| 602 |
| 603 |
| 604 void MacroAssembler::PopCPURegList(CPURegList registers) { |
| 605 int size = registers.RegisterSizeInBytes(); |
| 606 |
| 607 PrepareForPop(registers.Count(), size); |
| 608 // Pop up to four registers at a time because if the current stack pointer is |
| 609 // csp and reg_size is 32, registers must be pushed in blocks of four in |
| 610 // order to maintain the 16-byte alignment for csp. |
| 611 while (!registers.IsEmpty()) { |
| 612 int count_before = registers.Count(); |
| 613 const CPURegister& dst0 = registers.PopLowestIndex(); |
| 614 const CPURegister& dst1 = registers.PopLowestIndex(); |
| 615 const CPURegister& dst2 = registers.PopLowestIndex(); |
| 616 const CPURegister& dst3 = registers.PopLowestIndex(); |
| 617 int count = count_before - registers.Count(); |
| 618 PopHelper(count, size, dst0, dst1, dst2, dst3); |
| 619 } |
| 620 |
| 621 if (!csp.Is(StackPointer()) && emit_debug_code()) { |
| 622 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
| 623 // but if we keep it matching StackPointer, the simulator can detect memory |
| 624 // accesses in the now-free part of the stack. |
| 625 Mov(csp, StackPointer()); |
| 626 } |
| 627 } |
| 628 |
| 629 |
| 630 void MacroAssembler::PushMultipleTimes(int count, Register src) { |
| 631 int size = src.SizeInBytes(); |
| 632 |
| 633 PrepareForPush(count, size); |
| 634 // Push up to four registers at a time if possible because if the current |
| 635 // stack pointer is csp and the register size is 32, registers must be pushed |
| 636 // in blocks of four in order to maintain the 16-byte alignment for csp. |
| 637 while (count >= 4) { |
| 638 PushHelper(4, size, src, src, src, src); |
| 639 count -= 4; |
| 640 } |
| 641 if (count >= 2) { |
| 642 PushHelper(2, size, src, src, NoReg, NoReg); |
| 643 count -= 2; |
| 644 } |
| 645 if (count == 1) { |
| 646 PushHelper(1, size, src, NoReg, NoReg, NoReg); |
| 647 count -= 1; |
| 648 } |
| 649 ASSERT(count == 0); |
| 650 } |
| 651 |
| 652 |
| 653 void MacroAssembler::PushHelper(int count, int size, |
| 654 const CPURegister& src0, |
| 655 const CPURegister& src1, |
| 656 const CPURegister& src2, |
| 657 const CPURegister& src3) { |
| 658 // Ensure that we don't unintentially modify scratch or debug registers. |
| 659 InstructionAccurateScope scope(this); |
| 660 |
| 661 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); |
| 662 ASSERT(size == src0.SizeInBytes()); |
| 663 |
| 664 // When pushing multiple registers, the store order is chosen such that |
| 665 // Push(a, b) is equivalent to Push(a) followed by Push(b). |
| 666 switch (count) { |
| 667 case 1: |
| 668 ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone()); |
| 669 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex)); |
| 670 break; |
| 671 case 2: |
| 672 ASSERT(src2.IsNone() && src3.IsNone()); |
| 673 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex)); |
| 674 break; |
| 675 case 3: |
| 676 ASSERT(src3.IsNone()); |
| 677 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex)); |
| 678 str(src0, MemOperand(StackPointer(), 2 * size)); |
| 679 break; |
| 680 case 4: |
| 681 // Skip over 4 * size, then fill in the gap. This allows four W registers |
| 682 // to be pushed using csp, whilst maintaining 16-byte alignment for csp |
| 683 // at all times. |
| 684 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex)); |
| 685 stp(src1, src0, MemOperand(StackPointer(), 2 * size)); |
| 686 break; |
| 687 default: |
| 688 UNREACHABLE(); |
| 689 } |
| 690 } |
| 691 |
| 692 |
| 693 void MacroAssembler::PopHelper(int count, int size, |
| 694 const CPURegister& dst0, |
| 695 const CPURegister& dst1, |
| 696 const CPURegister& dst2, |
| 697 const CPURegister& dst3) { |
| 698 // Ensure that we don't unintentially modify scratch or debug registers. |
| 699 InstructionAccurateScope scope(this); |
| 700 |
| 701 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); |
| 702 ASSERT(size == dst0.SizeInBytes()); |
| 703 |
| 704 // When popping multiple registers, the load order is chosen such that |
| 705 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). |
| 706 switch (count) { |
| 707 case 1: |
| 708 ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); |
| 709 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex)); |
| 710 break; |
| 711 case 2: |
| 712 ASSERT(dst2.IsNone() && dst3.IsNone()); |
| 713 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex)); |
| 714 break; |
| 715 case 3: |
| 716 ASSERT(dst3.IsNone()); |
| 717 ldr(dst2, MemOperand(StackPointer(), 2 * size)); |
| 718 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex)); |
| 719 break; |
| 720 case 4: |
| 721 // Load the higher addresses first, then load the lower addresses and |
| 722 // skip the whole block in the second instruction. This allows four W |
| 723 // registers to be popped using csp, whilst maintaining 16-byte alignment |
| 724 // for csp at all times. |
| 725 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); |
| 726 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); |
| 727 break; |
| 728 default: |
| 729 UNREACHABLE(); |
| 730 } |
| 731 } |
| 732 |
| 733 |
| 734 void MacroAssembler::PrepareForPush(int count, int size) { |
| 735 // TODO(jbramley): Use AssertStackConsistency here, if possible. See the |
| 736 // AssertStackConsistency for details of why we can't at the moment. |
| 737 if (csp.Is(StackPointer())) { |
| 738 // If the current stack pointer is csp, then it must be aligned to 16 bytes |
| 739 // on entry and the total size of the specified registers must also be a |
| 740 // multiple of 16 bytes. |
| 741 ASSERT((count * size) % 16 == 0); |
| 742 } else { |
| 743 // Even if the current stack pointer is not the system stack pointer (csp), |
| 744 // the system stack pointer will still be modified in order to comply with |
| 745 // ABI rules about accessing memory below the system stack pointer. |
| 746 BumpSystemStackPointer(count * size); |
| 747 } |
| 748 } |
| 749 |
| 750 |
| 751 void MacroAssembler::PrepareForPop(int count, int size) { |
| 752 AssertStackConsistency(); |
| 753 if (csp.Is(StackPointer())) { |
| 754 // If the current stack pointer is csp, then it must be aligned to 16 bytes |
| 755 // on entry and the total size of the specified registers must also be a |
| 756 // multiple of 16 bytes. |
| 757 ASSERT((count * size) % 16 == 0); |
| 758 } |
| 759 } |
| 760 |
| 761 |
| 762 void MacroAssembler::Poke(const Register& src, const Operand& offset) { |
| 763 if (offset.IsImmediate()) { |
| 764 ASSERT(offset.immediate() >= 0); |
| 765 } else if (emit_debug_code()) { |
| 766 Cmp(xzr, offset); |
| 767 Check(le, "Poke offset is negative."); |
| 768 } |
| 769 |
| 770 Str(src, MemOperand(StackPointer(), offset)); |
| 771 } |
| 772 |
| 773 |
| 774 void MacroAssembler::Peek(const Register& dst, const Operand& offset) { |
| 775 if (offset.IsImmediate()) { |
| 776 ASSERT(offset.immediate() >= 0); |
| 777 } else if (emit_debug_code()) { |
| 778 Cmp(xzr, offset); |
| 779 Check(le, "Peek offset is negative."); |
| 780 } |
| 781 |
| 782 Ldr(dst, MemOperand(StackPointer(), offset)); |
| 783 } |
| 784 |
| 785 |
| 786 void MacroAssembler::PushCalleeSavedRegisters() { |
| 787 // Ensure that the macro-assembler doesn't use any scratch registers. |
| 788 InstructionAccurateScope scope(this); |
| 789 |
| 790 // This method must not be called unless the current stack pointer is the |
| 791 // system stack pointer (csp). |
| 792 ASSERT(csp.Is(StackPointer())); |
| 793 |
| 794 MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex); |
| 795 |
| 796 stp(d14, d15, tos); |
| 797 stp(d12, d13, tos); |
| 798 stp(d10, d11, tos); |
| 799 stp(d8, d9, tos); |
| 800 |
| 801 stp(x29, x30, tos); |
| 802 stp(x27, x28, tos); // x28 = jssp |
| 803 stp(x25, x26, tos); |
| 804 stp(x23, x24, tos); |
| 805 stp(x21, x22, tos); |
| 806 stp(x19, x20, tos); |
| 807 } |
| 808 |
| 809 |
| 810 void MacroAssembler::PopCalleeSavedRegisters() { |
| 811 // Ensure that the macro-assembler doesn't use any scratch registers. |
| 812 InstructionAccurateScope scope(this); |
| 813 |
| 814 // This method must not be called unless the current stack pointer is the |
| 815 // system stack pointer (csp). |
| 816 ASSERT(csp.Is(StackPointer())); |
| 817 |
| 818 MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex); |
| 819 |
| 820 ldp(x19, x20, tos); |
| 821 ldp(x21, x22, tos); |
| 822 ldp(x23, x24, tos); |
| 823 ldp(x25, x26, tos); |
| 824 ldp(x27, x28, tos); // x28 = jssp |
| 825 ldp(x29, x30, tos); |
| 826 |
| 827 ldp(d8, d9, tos); |
| 828 ldp(d10, d11, tos); |
| 829 ldp(d12, d13, tos); |
| 830 ldp(d14, d15, tos); |
| 831 } |
| 832 |
| 833 |
| 834 void MacroAssembler::AssertStackConsistency() { |
| 835 if (emit_debug_code() && !csp.Is(StackPointer())) { |
| 836 if (csp.Is(StackPointer())) { |
| 837 // TODO(jbramley): Check for csp alignment if it is the stack pointer. |
| 838 } else { |
| 839 // TODO(jbramley): Currently we cannot use this assertion in Push because |
| 840 // some calling code assumes that the flags are preserved. For an example, |
| 841 // look at Builtins::Generate_ArgumentsAdaptorTrampoline. |
| 842 Cmp(csp, StackPointer()); |
| 843 Check(ls, "The current stack pointer is below csp."); |
| 844 } |
| 845 } |
| 846 } |
| 847 |
| 848 |
| 849 void MacroAssembler::LoadRoot(Register destination, |
| 850 Heap::RootListIndex index) { |
| 851 // TODO(jbramley): Most root values are constants, and can be synthesized |
| 852 // without a load. Refer to the ARM back end for details. |
| 853 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); |
| 854 } |
| 855 |
| 856 |
| 857 void MacroAssembler::StoreRoot(Register source, |
| 858 Heap::RootListIndex index) { |
| 859 Str(source, MemOperand(root, index << kPointerSizeLog2)); |
| 860 } |
| 861 |
| 862 |
| 863 void MacroAssembler::LoadTrueFalseRoots(Register true_root, |
| 864 Register false_root) { |
| 865 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex); |
| 866 Ldp(true_root, false_root, |
| 867 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2)); |
| 868 } |
| 869 |
| 870 |
| 871 void MacroAssembler::LoadHeapObject(Register result, |
| 872 Handle<HeapObject> object) { |
| 873 AllowDeferredHandleDereference using_raw_address; |
| 874 if (isolate()->heap()->InNewSpace(*object)) { |
| 875 Handle<JSGlobalPropertyCell> cell = |
| 876 isolate()->factory()->NewJSGlobalPropertyCell(object); |
| 877 Mov(result, Operand(cell)); |
| 878 Ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset)); |
| 879 } else { |
| 880 Mov(result, Operand(object)); |
| 881 } |
| 882 } |
| 883 |
| 884 |
| 885 void MacroAssembler::CheckForInvalidValuesInCalleeSavedRegs(RegList list) { |
| 886 if (emit_debug_code()) { |
| 887 // Only check for callee-saved registers. |
| 888 // TODO(jbramley): Why? We still don't want caller-saved registers to be |
| 889 // pushed with invalid values. Perhaps we need a |
| 890 // CheckForInvalidValuesInRegs for other cases. |
| 891 Label invalid, ok; |
| 892 list &= kJSCalleeSavedRegList; |
| 893 for (unsigned i = kFirstCalleeSavedRegisterIndex; list != 0; i++) { |
| 894 if (list & (1 << i)) { |
| 895 // Clear the current register from the list. |
| 896 list &= ~(1 << i); |
| 897 Register current = Register(i, kXRegSize); |
| 898 Label smi; |
| 899 JumpIfSmi(current, &smi); |
| 900 // TODO(all): Better check for invalid values in callee-saved registers. |
| 901 // Check that the register is not in [0, 4 KB]. |
| 902 // This catches odd (untagged) integers. |
| 903 // We should actually check that the pointer is valid. |
| 904 Cmp(current, 4 * KB); |
| 905 B(hs, &invalid); |
| 906 Bind(&smi); |
| 907 } |
| 908 } |
| 909 B(&ok); |
| 910 Bind(&invalid); |
| 911 Abort("Invalid value in a callee saved register."); |
| 912 Bind(&ok); |
| 913 } |
| 914 } |
| 915 |
| 916 |
| 917 void MacroAssembler::LoadInstanceDescriptors(Register map, |
| 918 Register descriptors) { |
| 919 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); |
| 920 } |
| 921 |
| 922 |
| 923 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { |
| 924 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
| 925 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); |
| 926 } |
| 927 |
| 928 |
| 929 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) { |
| 930 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); |
| 931 Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset)); |
| 932 And(dst, dst, Map::EnumLengthBits::kMask); |
| 933 } |
| 934 |
| 935 |
| 936 void MacroAssembler::EnumLengthSmi(Register dst, Register map) { |
| 937 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); |
| 938 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
| 939 And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask))); |
| 940 } |
| 941 |
| 942 |
| 943 void MacroAssembler::CheckEnumCache(Register object, |
| 944 Register null_value, |
| 945 Register scratch0, |
| 946 Register scratch1, |
| 947 Register scratch2, |
| 948 Register scratch3, |
| 949 Label* call_runtime) { |
| 950 ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2, |
| 951 scratch3)); |
| 952 |
| 953 Register empty_fixed_array_value = scratch0; |
| 954 Register current_object = scratch1; |
| 955 |
| 956 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); |
| 957 Label next, start; |
| 958 |
| 959 Mov(current_object, object); |
| 960 |
| 961 // Check if the enum length field is properly initialized, indicating that |
| 962 // there is an enum cache. |
| 963 Register map = scratch2; |
| 964 Register enum_length = scratch3; |
| 965 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset)); |
| 966 |
| 967 EnumLengthUntagged(enum_length, map); |
| 968 Cmp(enum_length, Map::kInvalidEnumCache); |
| 969 B(eq, call_runtime); |
| 970 |
| 971 B(&start); |
| 972 |
| 973 Bind(&next); |
| 974 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset)); |
| 975 |
| 976 // For all objects but the receiver, check that the cache is empty. |
| 977 EnumLengthUntagged(enum_length, map); |
| 978 Cbnz(enum_length, call_runtime); |
| 979 |
| 980 Bind(&start); |
| 981 |
| 982 // Check that there are no elements. Register current_object contains the |
| 983 // current JS object we've reached through the prototype chain. |
| 984 Ldr(current_object, FieldMemOperand(current_object, |
| 985 JSObject::kElementsOffset)); |
| 986 Cmp(current_object, empty_fixed_array_value); |
| 987 B(ne, call_runtime); |
| 988 |
| 989 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset)); |
| 990 Cmp(current_object, null_value); |
| 991 B(ne, &next); |
| 992 } |
| 993 |
| 994 |
| 995 void MacroAssembler::TestJSArrayForAllocationSiteInfo(Register receiver, |
| 996 Register scratch1, |
| 997 Register scratch2) { |
| 998 Label no_info_available; |
| 999 ExternalReference new_space_start = |
| 1000 ExternalReference::new_space_start(isolate()); |
| 1001 ExternalReference new_space_allocation_top = |
| 1002 ExternalReference::new_space_allocation_top_address(isolate()); |
| 1003 |
| 1004 Add(scratch1, receiver, |
| 1005 JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag); |
| 1006 Cmp(scratch1, Operand(new_space_start)); |
| 1007 B(lt, &no_info_available); |
| 1008 |
| 1009 Mov(scratch2, Operand(new_space_allocation_top)); |
| 1010 Ldr(scratch2, MemOperand(scratch2)); |
| 1011 Cmp(scratch1, scratch2); |
| 1012 B(gt, &no_info_available); |
| 1013 |
| 1014 Ldr(scratch1, MemOperand(scratch1, -AllocationSiteInfo::kSize)); |
| 1015 Cmp(scratch1, |
| 1016 Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map()))); |
| 1017 |
| 1018 Bind(&no_info_available); |
| 1019 } |
| 1020 |
| 1021 |
| 1022 void MacroAssembler::JumpToHandlerEntry(Register exception, |
| 1023 Register object, |
| 1024 Register state, |
| 1025 Register scratch1, |
| 1026 Register scratch2) { |
| 1027 // Handler expects argument in x0. |
| 1028 ASSERT(exception.Is(x0)); |
| 1029 |
| 1030 // Compute the handler entry address and jump to it. The handler table is |
| 1031 // a fixed array of (smi-tagged) code offsets. |
| 1032 Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset)); |
| 1033 Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); |
| 1034 STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2); |
| 1035 Lsr(scratch2, state, StackHandler::kKindWidth); |
| 1036 Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); |
| 1037 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); |
| 1038 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); |
| 1039 Br(scratch1); |
| 1040 } |
| 1041 |
| 1042 |
| 1043 void MacroAssembler::InNewSpace(Register object, |
| 1044 Condition cond, |
| 1045 Label* branch) { |
| 1046 ASSERT(cond == eq || cond == ne); |
| 1047 // Use Tmp1() to have a different destination register, as Tmp0() will be used |
| 1048 // for relocation. |
| 1049 And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate()))); |
| 1050 Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate()))); |
| 1051 B(cond, branch); |
| 1052 } |
| 1053 |
| 1054 |
| 1055 void MacroAssembler::Throw(Register value, |
| 1056 Register scratch1, |
| 1057 Register scratch2, |
| 1058 Register scratch3, |
| 1059 Register scratch4) { |
| 1060 // Adjust this code if not the case. |
| 1061 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 1062 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 1063 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
| 1064 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
| 1065 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
| 1066 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
| 1067 |
| 1068 // The handler expects the exception in x0. |
| 1069 ASSERT(value.Is(x0)); |
| 1070 |
| 1071 // Drop the stack pointer to the top of the top handler. |
| 1072 ASSERT(jssp.Is(StackPointer())); |
| 1073 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, |
| 1074 isolate()))); |
| 1075 Ldr(jssp, MemOperand(scratch1)); |
| 1076 // Restore the next handler. |
| 1077 Pop(scratch2); |
| 1078 Str(scratch2, MemOperand(scratch1)); |
| 1079 |
| 1080 // Get the code object and state. Restore the context and frame pointer. |
| 1081 Register object = scratch1; |
| 1082 Register state = scratch2; |
| 1083 Pop(object, state, cp, fp); |
| 1084 |
| 1085 // If the handler is a JS frame, restore the context to the frame. |
| 1086 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp |
| 1087 // or cp. |
| 1088 Label not_js_frame; |
| 1089 Cbz(cp, ¬_js_frame); |
| 1090 Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 1091 Bind(¬_js_frame); |
| 1092 |
| 1093 JumpToHandlerEntry(value, object, state, scratch3, scratch4); |
| 1094 } |
| 1095 |
| 1096 |
| 1097 void MacroAssembler::ThrowUncatchable(Register value, |
| 1098 Register scratch1, |
| 1099 Register scratch2, |
| 1100 Register scratch3, |
| 1101 Register scratch4) { |
| 1102 // Adjust this code if not the case. |
| 1103 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 1104 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
| 1105 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
| 1106 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
| 1107 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
| 1108 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
| 1109 |
| 1110 // The handler expects the exception in x0. |
| 1111 ASSERT(value.Is(x0)); |
| 1112 |
| 1113 // Drop the stack pointer to the top of the top stack handler. |
| 1114 ASSERT(jssp.Is(StackPointer())); |
| 1115 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, |
| 1116 isolate()))); |
| 1117 Ldr(jssp, MemOperand(scratch1)); |
| 1118 |
| 1119 // Unwind the handlers until the ENTRY handler is found. |
| 1120 Label fetch_next, check_kind; |
| 1121 B(&check_kind); |
| 1122 Bind(&fetch_next); |
| 1123 Peek(jssp, StackHandlerConstants::kNextOffset); |
| 1124 |
| 1125 Bind(&check_kind); |
| 1126 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); |
| 1127 Peek(scratch2, StackHandlerConstants::kStateOffset); |
| 1128 TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next); |
| 1129 |
| 1130 // Set the top handler address to next handler past the top ENTRY handler. |
| 1131 Pop(scratch2); |
| 1132 Str(scratch2, MemOperand(scratch1)); |
| 1133 |
| 1134 // Get the code object and state. Clear the context and frame pointer (0 was |
| 1135 // saved in the handler). |
| 1136 Register object = scratch1; |
| 1137 Register state = scratch2; |
| 1138 Pop(object, state, cp, fp); |
| 1139 |
| 1140 JumpToHandlerEntry(value, object, state, scratch3, scratch4); |
| 1141 } |
| 1142 |
| 1143 |
| 1144 void MacroAssembler::SmiAbs(Register smi, Register scratch, Label *slow) { |
| 1145 // TODO(all): There is another possible implementation of this function |
| 1146 // which would consist of: |
| 1147 // * Comparing the smi with 0. |
| 1148 // * Performing a conditional negate (cneg). |
| 1149 // * Testing if the result is still negative. |
| 1150 // |
| 1151 // This other implementation uses 1 more instruction but uses one of the new |
| 1152 // A64 conditional instruction and doesn't use shifted registers. |
| 1153 // |
| 1154 // This two versions should be profiled on real hardware as we have no idea |
| 1155 // which one will be the fastest. |
| 1156 ASSERT(!AreAliased(smi, scratch)); |
| 1157 |
| 1158 STATIC_ASSERT(kSmiTag == 0); |
| 1159 STATIC_ASSERT(kSmiShift == 32); |
| 1160 |
| 1161 // Do bitwise not or do nothing depending on the sign of the argument. |
| 1162 __ Eor(scratch, smi, Operand(smi, ASR, kXRegSize - 1)); |
| 1163 // Add 1 or do nothing depending on the sign of the argument. |
| 1164 __ Adds(smi, scratch, Operand(smi, LSR, kXRegSize - 1)); |
| 1165 |
| 1166 // If the result is still negative, go to the slow case. |
| 1167 // This only happens for the most negative smi. |
| 1168 __ B(mi, slow); |
| 1169 } |
| 1170 |
| 1171 |
| 1172 void MacroAssembler::AssertSmi(Register object, char const* fail_message) { |
| 1173 if (emit_debug_code()) { |
| 1174 STATIC_ASSERT(kSmiTag == 0); |
| 1175 Tst(object, kSmiTagMask); |
| 1176 Check(eq, fail_message); |
| 1177 } |
| 1178 } |
| 1179 |
| 1180 |
| 1181 void MacroAssembler::AssertNotSmi(Register object, char const* fail_message) { |
| 1182 if (emit_debug_code()) { |
| 1183 STATIC_ASSERT(kSmiTag == 0); |
| 1184 Tst(object, kSmiTagMask); |
| 1185 Check(ne, fail_message); |
| 1186 } |
| 1187 } |
| 1188 |
| 1189 |
| 1190 void MacroAssembler::AssertName(Register object) { |
| 1191 if (emit_debug_code()) { |
| 1192 STATIC_ASSERT(kSmiTag == 0); |
| 1193 // TODO(jbramley): Add AbortIfSmi and related functions. |
| 1194 Label not_smi; |
| 1195 JumpIfNotSmi(object, ¬_smi); |
| 1196 Abort("Operand is a smi and not a name"); |
| 1197 Bind(¬_smi); |
| 1198 |
| 1199 Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1200 CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE); |
| 1201 Check(ls, "Operand is not a name"); |
| 1202 } |
| 1203 } |
| 1204 |
| 1205 |
| 1206 void MacroAssembler::AssertString(Register object) { |
| 1207 if (emit_debug_code()) { |
| 1208 Register temp = Tmp1(); |
| 1209 STATIC_ASSERT(kSmiTag == 0); |
| 1210 Tst(object, kSmiTagMask); |
| 1211 Check(ne, "Operand is a smi and not a string"); |
| 1212 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1213 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); |
| 1214 Check(lo, "Operand is not a string"); |
| 1215 } |
| 1216 } |
| 1217 |
| 1218 |
| 1219 void MacroAssembler::AssertRootValue(Register src, |
| 1220 Heap::RootListIndex root_value_index, |
| 1221 const char* message) { |
| 1222 if (emit_debug_code()) { |
| 1223 CompareRoot(src, root_value_index); |
| 1224 Check(eq, message); |
| 1225 } |
| 1226 } |
| 1227 |
| 1228 |
| 1229 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { |
| 1230 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. |
| 1231 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id); |
| 1232 } |
| 1233 |
| 1234 |
| 1235 void MacroAssembler::TailCallStub(CodeStub* stub) { |
| 1236 ASSERT(allow_stub_calls_ || |
| 1237 stub->CompilingCallsToThisStubIsGCSafe(isolate())); |
| 1238 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); |
| 1239 } |
| 1240 |
| 1241 |
| 1242 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
| 1243 int num_arguments) { |
| 1244 // All arguments must be on the stack before this function is called. |
| 1245 // x0 holds the return value after the call. |
| 1246 |
| 1247 // Check that the number of arguments matches what the function expects. |
| 1248 // If f->nargs is -1, the function can accept a variable number of arguments. |
| 1249 if (f->nargs >= 0 && f->nargs != num_arguments) { |
| 1250 // Illegal operation: drop the stack arguments and return undefined. |
| 1251 if (num_arguments > 0) { |
| 1252 Drop(num_arguments); |
| 1253 } |
| 1254 LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
| 1255 return; |
| 1256 } |
| 1257 |
| 1258 // Place the necessary arguments. |
| 1259 Mov(x0, num_arguments); |
| 1260 Mov(x1, Operand(ExternalReference(f, isolate()))); |
| 1261 |
| 1262 CEntryStub stub(1); |
| 1263 CallStub(&stub); |
| 1264 } |
| 1265 |
| 1266 |
| 1267 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
| 1268 CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
| 1269 } |
| 1270 |
| 1271 |
| 1272 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { |
| 1273 const Runtime::Function* function = Runtime::FunctionForId(id); |
| 1274 |
| 1275 // Place the necessary arguments. |
| 1276 Mov(x0, function->nargs); |
| 1277 Mov(x1, Operand(ExternalReference(function, isolate()))); |
| 1278 |
| 1279 // TODO(all): Here we should ask CEntryStub to save floating point registers |
| 1280 // but this is not supported at the moment. |
| 1281 CEntryStub stub(1); |
| 1282 CallStub(&stub); |
| 1283 } |
| 1284 |
| 1285 |
| 1286 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { |
| 1287 return ref0.address() - ref1.address(); |
| 1288 } |
| 1289 |
| 1290 |
| 1291 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, |
| 1292 int stack_space, |
| 1293 int spill_offset, |
| 1294 bool returns_handle, |
| 1295 int return_value_offset_from_fp) { |
| 1296 ExternalReference next_address = |
| 1297 ExternalReference::handle_scope_next_address(isolate()); |
| 1298 const int kNextOffset = 0; |
| 1299 const int kLimitOffset = AddressOffset( |
| 1300 ExternalReference::handle_scope_limit_address(isolate()), |
| 1301 next_address); |
| 1302 const int kLevelOffset = AddressOffset( |
| 1303 ExternalReference::handle_scope_level_address(isolate()), |
| 1304 next_address); |
| 1305 |
| 1306 // Save the callee-save registers we are going to use. |
| 1307 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4); |
| 1308 Poke(x19, (spill_offset + 0) * kXRegSizeInBytes); |
| 1309 Poke(x20, (spill_offset + 1) * kXRegSizeInBytes); |
| 1310 Poke(x21, (spill_offset + 2) * kXRegSizeInBytes); |
| 1311 Poke(x22, (spill_offset + 3) * kXRegSizeInBytes); |
| 1312 |
| 1313 // Allocate HandleScope in callee-save registers. |
| 1314 // We will need to restore the HandleScope after the call to the API function, |
| 1315 // by allocating it in callee-save registers they will be preserved by C code. |
| 1316 Register handle_scope_base = x22; |
| 1317 Register next_address_reg = x19; |
| 1318 Register limit_reg = x20; |
| 1319 Register level_reg = w21; |
| 1320 Mov(handle_scope_base, Operand(next_address)); |
| 1321 Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset)); |
| 1322 Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset)); |
| 1323 |
| 1324 Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset)); |
| 1325 Add(level_reg, level_reg, 1); |
| 1326 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); |
| 1327 |
| 1328 if (FLAG_log_timer_events) { |
| 1329 FrameScope frame(this, StackFrame::MANUAL); |
| 1330 PushSafepointRegisters(); |
| 1331 Mov(x0, Operand(ExternalReference::isolate_address(isolate()))); |
| 1332 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); |
| 1333 PopSafepointRegisters(); |
| 1334 } |
| 1335 |
| 1336 // Native call returns to the DirectCEntry stub which redirects to the |
| 1337 // return address pushed on stack (could have moved after GC). |
| 1338 // DirectCEntry stub itself is generated early and never moves. |
| 1339 DirectCEntryStub stub; |
| 1340 __ Mov(x2, Operand(function)); |
| 1341 stub.GenerateCall(this, x2); |
| 1342 |
| 1343 if (FLAG_log_timer_events) { |
| 1344 FrameScope frame(this, StackFrame::MANUAL); |
| 1345 PushSafepointRegisters(); |
| 1346 Mov(x0, Operand(ExternalReference::isolate_address(isolate()))); |
| 1347 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); |
| 1348 PopSafepointRegisters(); |
| 1349 } |
| 1350 |
| 1351 Label promote_scheduled_exception; |
| 1352 Label delete_allocated_handles; |
| 1353 Label leave_exit_frame; |
| 1354 Label result_is_not_null; |
| 1355 Label return_value_loaded; |
| 1356 |
| 1357 if (returns_handle) { |
| 1358 Label load_return_value; |
| 1359 Cbz(x0, &load_return_value); |
| 1360 // Dereference returned value. |
| 1361 Ldr(x0, MemOperand(x0)); |
| 1362 B(&return_value_loaded); |
| 1363 Bind(&load_return_value); |
| 1364 } |
| 1365 // load value from ReturnValue |
| 1366 Ldr(x0, MemOperand(fp, return_value_offset_from_fp * kPointerSize)); |
| 1367 Bind(&return_value_loaded); |
| 1368 // No more valid handles (the result handle was the last one). Restore |
| 1369 // previous handle scope. |
| 1370 Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset)); |
| 1371 if (emit_debug_code()) { |
| 1372 Ldr(w1, MemOperand(handle_scope_base, kLevelOffset)); |
| 1373 Cmp(w1, level_reg); |
| 1374 Check(eq, "Unexpected level after return from api call"); |
| 1375 } |
| 1376 Sub(level_reg, level_reg, 1); |
| 1377 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); |
| 1378 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset)); |
| 1379 Cmp(limit_reg, x1); |
| 1380 B(ne, &delete_allocated_handles); |
| 1381 |
| 1382 Bind(&leave_exit_frame); |
| 1383 // Restore callee-saved registers. |
| 1384 Peek(x19, (spill_offset + 0) * kXRegSizeInBytes); |
| 1385 Peek(x20, (spill_offset + 1) * kXRegSizeInBytes); |
| 1386 Peek(x21, (spill_offset + 2) * kXRegSizeInBytes); |
| 1387 Peek(x22, (spill_offset + 3) * kXRegSizeInBytes); |
| 1388 |
| 1389 // Check if the function scheduled an exception. |
| 1390 Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate()))); |
| 1391 Ldr(x5, MemOperand(x5)); |
| 1392 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception); |
| 1393 |
| 1394 LeaveExitFrame(false, x1); |
| 1395 Drop(stack_space); |
| 1396 Ret(); |
| 1397 |
| 1398 Bind(&promote_scheduled_exception); |
| 1399 TailCallExternalReference( |
| 1400 ExternalReference(Runtime::kPromoteScheduledException, isolate()), |
| 1401 0, |
| 1402 1); |
| 1403 |
| 1404 // HandleScope limit has changed. Delete allocated extensions. |
| 1405 Bind(&delete_allocated_handles); |
| 1406 Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset)); |
| 1407 // Save the return value in a callee-save register. |
| 1408 Register saved_result = x19; |
| 1409 Mov(saved_result, x0); |
| 1410 Mov(x0, Operand(ExternalReference::isolate_address(isolate()))); |
| 1411 CallCFunction( |
| 1412 ExternalReference::delete_handle_scope_extensions(isolate()), 1); |
| 1413 Mov(x0, saved_result); |
| 1414 B(&leave_exit_frame); |
| 1415 } |
| 1416 |
| 1417 |
| 1418 void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
| 1419 int num_arguments) { |
| 1420 Mov(x0, num_arguments); |
| 1421 Mov(x1, Operand(ext)); |
| 1422 |
| 1423 CEntryStub stub(1); |
| 1424 CallStub(&stub); |
| 1425 } |
| 1426 |
| 1427 |
| 1428 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { |
| 1429 Mov(x1, Operand(builtin)); |
| 1430 CEntryStub stub(1); |
| 1431 Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); |
| 1432 } |
| 1433 |
| 1434 |
| 1435 void MacroAssembler::GetBuiltinFunction(Register target, |
| 1436 Builtins::JavaScript id) { |
| 1437 // Load the builtins object into target register. |
| 1438 Ldr(target, GlobalObjectMemOperand()); |
| 1439 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); |
| 1440 // Load the JavaScript builtin function from the builtins object. |
| 1441 Ldr(target, FieldMemOperand(target, |
| 1442 JSBuiltinsObject::OffsetOfFunctionWithId(id))); |
| 1443 } |
| 1444 |
| 1445 |
| 1446 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
| 1447 ASSERT(!target.is(x1)); |
| 1448 GetBuiltinFunction(x1, id); |
| 1449 // Load the code entry point from the builtins object. |
| 1450 Ldr(target, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); |
| 1451 } |
| 1452 |
| 1453 |
| 1454 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
| 1455 InvokeFlag flag, |
| 1456 const CallWrapper& call_wrapper) { |
| 1457 ASM_LOCATION("MacroAssembler::InvokeBuiltin"); |
| 1458 // You can't call a builtin without a valid frame. |
| 1459 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 1460 |
| 1461 GetBuiltinEntry(x2, id); |
| 1462 if (flag == CALL_FUNCTION) { |
| 1463 call_wrapper.BeforeCall(CallSize(x2)); |
| 1464 SetCallKind(x5, CALL_AS_METHOD); |
| 1465 Call(x2); |
| 1466 call_wrapper.AfterCall(); |
| 1467 } else { |
| 1468 ASSERT(flag == JUMP_FUNCTION); |
| 1469 SetCallKind(x5, CALL_AS_METHOD); |
| 1470 Jump(x2); |
| 1471 } |
| 1472 } |
| 1473 |
| 1474 |
| 1475 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, |
| 1476 int num_arguments, |
| 1477 int result_size) { |
| 1478 // TODO(1236192): Most runtime routines don't need the number of |
| 1479 // arguments passed in because it is constant. At some point we |
| 1480 // should remove this need and make the runtime routine entry code |
| 1481 // smarter. |
| 1482 Mov(x0, num_arguments); |
| 1483 JumpToExternalReference(ext); |
| 1484 } |
| 1485 |
| 1486 |
| 1487 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, |
| 1488 int num_arguments, |
| 1489 int result_size) { |
| 1490 TailCallExternalReference(ExternalReference(fid, isolate()), |
| 1491 num_arguments, |
| 1492 result_size); |
| 1493 } |
| 1494 |
| 1495 |
| 1496 void MacroAssembler::InitializeNewString(Register string, |
| 1497 Register length, |
| 1498 Heap::RootListIndex map_index, |
| 1499 Register scratch1, |
| 1500 Register scratch2) { |
| 1501 ASSERT(!AreAliased(string, length, scratch1, scratch2)); |
| 1502 LoadRoot(scratch2, map_index); |
| 1503 SmiTag(scratch1, length); |
| 1504 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 1505 |
| 1506 Mov(scratch2, String::kEmptyHashField); |
| 1507 Str(scratch1, FieldMemOperand(string, String::kLengthOffset)); |
| 1508 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset)); |
| 1509 } |
| 1510 |
| 1511 |
| 1512 int MacroAssembler::ActivationFrameAlignment() { |
| 1513 #if defined(V8_HOST_ARCH_A64) |
| 1514 // Running on the real platform. Use the alignment as mandated by the local |
| 1515 // environment. |
| 1516 // Note: This will break if we ever start generating snapshots on one ARM |
| 1517 // platform for another ARM platform with a different alignment. |
| 1518 return OS::ActivationFrameAlignment(); |
| 1519 #else // defined(V8_HOST_ARCH_ARM) |
| 1520 // If we are using the simulator then we should always align to the expected |
| 1521 // alignment. As the simulator is used to generate snapshots we do not know |
| 1522 // if the target platform will need alignment, so this is controlled from a |
| 1523 // flag. |
| 1524 return FLAG_sim_stack_alignment; |
| 1525 #endif // defined(V8_HOST_ARCH_A64) |
| 1526 } |
| 1527 |
| 1528 |
| 1529 void MacroAssembler::CallCFunction(ExternalReference function, |
| 1530 int num_of_reg_args) { |
| 1531 CallCFunction(function, num_of_reg_args, 0); |
| 1532 } |
| 1533 |
| 1534 |
| 1535 void MacroAssembler::CallCFunction(ExternalReference function, |
| 1536 int num_of_reg_args, |
| 1537 int num_of_double_args) { |
| 1538 Mov(Tmp0(), Operand(function)); |
| 1539 CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args); |
| 1540 } |
| 1541 |
| 1542 |
| 1543 void MacroAssembler::CallCFunction(Register function, |
| 1544 int num_of_reg_args, |
| 1545 int num_of_double_args) { |
| 1546 ASSERT(has_frame()); |
| 1547 // We can pass 8 integer arguments in registers. If we need to pass more than |
| 1548 // that, we'll need to implement support for passing them on the stack. |
| 1549 ASSERT(num_of_reg_args <= 8); |
| 1550 |
| 1551 // If we're passing doubles, we're limited to the following prototypes |
| 1552 // (defined by ExternalReference::Type): |
| 1553 // BUILTIN_COMPARE_CALL: int f(double, double) |
| 1554 // BUILTIN_FP_FP_CALL: double f(double, double) |
| 1555 // BUILTIN_FP_CALL: double f(double) |
| 1556 // BUILTIN_FP_INT_CALL: double f(double, int) |
| 1557 if (num_of_double_args > 0) { |
| 1558 ASSERT(num_of_reg_args <= 1); |
| 1559 ASSERT((num_of_double_args + num_of_reg_args) <= 2); |
| 1560 } |
| 1561 |
| 1562 |
| 1563 // If the stack pointer is not csp, we need to derive an aligned csp from the |
| 1564 // current stack pointer. |
| 1565 const Register old_stack_pointer = StackPointer(); |
| 1566 if (!csp.Is(old_stack_pointer)) { |
| 1567 AssertStackConsistency(); |
| 1568 |
| 1569 int sp_alignment = ActivationFrameAlignment(); |
| 1570 // The ABI mandates at least 16-byte alignment. |
| 1571 ASSERT(sp_alignment >= 16); |
| 1572 ASSERT(IsPowerOf2(sp_alignment)); |
| 1573 |
| 1574 // The current stack pointer is a callee saved register, and is preserved |
| 1575 // across the call. |
| 1576 ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer)); |
| 1577 |
| 1578 // Align and synchronize the system stack pointer with jssp. |
| 1579 Bic(csp, old_stack_pointer, sp_alignment - 1); |
| 1580 SetStackPointer(csp); |
| 1581 } |
| 1582 |
| 1583 // Call directly. The function called cannot cause a GC, or allow preemption, |
| 1584 // so the return address in the link register stays correct. |
| 1585 Call(function); |
| 1586 |
| 1587 if (!csp.Is(old_stack_pointer)) { |
| 1588 if (emit_debug_code()) { |
| 1589 // Because the stack pointer must be aligned on a 16-byte boundary, the |
| 1590 // aligned csp can be up to 12 bytes below the jssp. This is the case |
| 1591 // where we only pushed one W register on top of an aligned jssp. |
| 1592 Register temp = Tmp1(); |
| 1593 ASSERT(ActivationFrameAlignment() == 16); |
| 1594 Sub(temp, csp, old_stack_pointer); |
| 1595 // We want temp <= 0 && temp >= -12. |
| 1596 Cmp(temp, 0); |
| 1597 Ccmp(temp, -12, NFlag, le); |
| 1598 Check(ge, "The stack was corrupted by MacroAssembler::Call()."); |
| 1599 } |
| 1600 SetStackPointer(old_stack_pointer); |
| 1601 } |
| 1602 } |
| 1603 |
| 1604 |
| 1605 void MacroAssembler::Jump(Register target) { |
| 1606 Br(target); |
| 1607 } |
| 1608 |
| 1609 |
| 1610 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { |
| 1611 Mov(Tmp0(), Operand(target, rmode)); |
| 1612 Br(Tmp0()); |
| 1613 } |
| 1614 |
| 1615 |
| 1616 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { |
| 1617 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
| 1618 Jump(reinterpret_cast<intptr_t>(target), rmode); |
| 1619 } |
| 1620 |
| 1621 |
| 1622 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { |
| 1623 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 1624 AllowDeferredHandleDereference embedding_raw_address; |
| 1625 Jump(reinterpret_cast<intptr_t>(code.location()), rmode); |
| 1626 } |
| 1627 |
| 1628 |
| 1629 void MacroAssembler::Call(Register target) { |
| 1630 BlockConstPoolScope scope(this); |
| 1631 #ifdef DEBUG |
| 1632 Label start_call; |
| 1633 Bind(&start_call); |
| 1634 #endif |
| 1635 |
| 1636 Blr(target); |
| 1637 |
| 1638 #ifdef DEBUG |
| 1639 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); |
| 1640 #endif |
| 1641 } |
| 1642 |
| 1643 |
| 1644 void MacroAssembler::Call(Label* target) { |
| 1645 BlockConstPoolScope scope(this); |
| 1646 #ifdef DEBUG |
| 1647 Label start_call; |
| 1648 Bind(&start_call); |
| 1649 #endif |
| 1650 |
| 1651 Bl(target); |
| 1652 |
| 1653 #ifdef DEBUG |
| 1654 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); |
| 1655 #endif |
| 1656 } |
| 1657 |
| 1658 |
| 1659 // MacroAssembler::CallSize is sensitive to changes in this function, as it |
| 1660 // requires to know how many instructions are used to branch to the target. |
| 1661 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { |
| 1662 BlockConstPoolScope scope(this); |
| 1663 #ifdef DEBUG |
| 1664 Label start_call; |
| 1665 Bind(&start_call); |
| 1666 #endif |
| 1667 // Statement positions are expected to be recorded when the target |
| 1668 // address is loaded. |
| 1669 positions_recorder()->WriteRecordedPositions(); |
| 1670 |
| 1671 // Addresses always have 64 bits, so we shouldn't encounter NONE32. |
| 1672 ASSERT(rmode != RelocInfo::NONE32); |
| 1673 |
| 1674 if (rmode == RelocInfo::NONE64) { |
| 1675 uint64_t imm = reinterpret_cast<uint64_t>(target); |
| 1676 movz(Tmp0(), (imm >> 0) & 0xffff, 0); |
| 1677 movk(Tmp0(), (imm >> 16) & 0xffff, 16); |
| 1678 movk(Tmp0(), (imm >> 32) & 0xffff, 32); |
| 1679 movk(Tmp0(), (imm >> 48) & 0xffff, 48); |
| 1680 } else { |
| 1681 LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode)); |
| 1682 } |
| 1683 Blr(Tmp0()); |
| 1684 #ifdef DEBUG |
| 1685 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); |
| 1686 #endif |
| 1687 } |
| 1688 |
| 1689 |
| 1690 void MacroAssembler::Call(Handle<Code> code, |
| 1691 RelocInfo::Mode rmode, |
| 1692 TypeFeedbackId ast_id) { |
| 1693 #ifdef DEBUG |
| 1694 Label start_call; |
| 1695 Bind(&start_call); |
| 1696 #endif |
| 1697 |
| 1698 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) { |
| 1699 SetRecordedAstId(ast_id); |
| 1700 rmode = RelocInfo::CODE_TARGET_WITH_ID; |
| 1701 } |
| 1702 |
| 1703 AllowDeferredHandleDereference embedding_raw_address; |
| 1704 Call(reinterpret_cast<Address>(code.location()), rmode); |
| 1705 |
| 1706 #ifdef DEBUG |
| 1707 // Check the size of the code generated. |
| 1708 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id)); |
| 1709 #endif |
| 1710 } |
| 1711 |
| 1712 |
| 1713 int MacroAssembler::CallSize(Register target) { |
| 1714 USE(target); |
| 1715 return kInstructionSize; |
| 1716 } |
| 1717 |
| 1718 |
| 1719 int MacroAssembler::CallSize(Label* target) { |
| 1720 USE(target); |
| 1721 return kInstructionSize; |
| 1722 } |
| 1723 |
| 1724 |
| 1725 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) { |
| 1726 USE(target); |
| 1727 |
| 1728 // Addresses always have 64 bits, so we shouldn't encounter NONE32. |
| 1729 ASSERT(rmode != RelocInfo::NONE32); |
| 1730 |
| 1731 if (rmode == RelocInfo::NONE64) { |
| 1732 return kCallSizeWithoutRelocation; |
| 1733 } else { |
| 1734 return kCallSizeWithRelocation; |
| 1735 } |
| 1736 } |
| 1737 |
| 1738 |
| 1739 int MacroAssembler::CallSize(Handle<Code> code, |
| 1740 RelocInfo::Mode rmode, |
| 1741 TypeFeedbackId ast_id) { |
| 1742 USE(code); |
| 1743 USE(ast_id); |
| 1744 |
| 1745 // Addresses always have 64 bits, so we shouldn't encounter NONE32. |
| 1746 ASSERT(rmode != RelocInfo::NONE32); |
| 1747 |
| 1748 if (rmode == RelocInfo::NONE64) { |
| 1749 return kCallSizeWithoutRelocation; |
| 1750 } else { |
| 1751 return kCallSizeWithRelocation; |
| 1752 } |
| 1753 } |
| 1754 |
| 1755 |
| 1756 |
| 1757 |
| 1758 |
| 1759 void MacroAssembler::JumpForHeapNumber(Register object, |
| 1760 Register heap_number_map, |
| 1761 Label* on_heap_number, |
| 1762 Label* on_not_heap_number) { |
| 1763 ASSERT(on_heap_number || on_not_heap_number); |
| 1764 // Tmp0() is used as a scratch register. |
| 1765 ASSERT(!AreAliased(Tmp0(), heap_number_map)); |
| 1766 AssertNotSmi(object); |
| 1767 |
| 1768 // Load the HeapNumber map if it is not passed. |
| 1769 if (heap_number_map.Is(NoReg)) { |
| 1770 heap_number_map = Tmp1(); |
| 1771 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1772 } else { |
| 1773 // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map. |
| 1774 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1775 } |
| 1776 |
| 1777 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1778 Cmp(Tmp0(), heap_number_map); |
| 1779 |
| 1780 if (on_heap_number) { |
| 1781 B(eq, on_heap_number); |
| 1782 } |
| 1783 if (on_not_heap_number) { |
| 1784 B(ne, on_not_heap_number); |
| 1785 } |
| 1786 } |
| 1787 |
| 1788 |
| 1789 void MacroAssembler::JumpIfHeapNumber(Register object, |
| 1790 Label* on_heap_number, |
| 1791 Register heap_number_map) { |
| 1792 JumpForHeapNumber(object, |
| 1793 heap_number_map, |
| 1794 on_heap_number, |
| 1795 NULL); |
| 1796 } |
| 1797 |
| 1798 |
| 1799 void MacroAssembler::JumpIfNotHeapNumber(Register object, |
| 1800 Label* on_not_heap_number, |
| 1801 Register heap_number_map) { |
| 1802 JumpForHeapNumber(object, |
| 1803 heap_number_map, |
| 1804 NULL, |
| 1805 on_not_heap_number); |
| 1806 } |
| 1807 |
| 1808 |
| 1809 void MacroAssembler::TryConvertDoubleToInt(Register as_int, |
| 1810 FPRegister value, |
| 1811 FPRegister scratch_d, |
| 1812 Label* on_successful_conversion, |
| 1813 Label* on_failed_conversion) { |
| 1814 // Convert to an int and back again, then compare with the original value. |
| 1815 Fcvtzs(as_int, value); |
| 1816 Scvtf(scratch_d, as_int); |
| 1817 Fcmp(value, scratch_d); |
| 1818 |
| 1819 if (on_successful_conversion) { |
| 1820 B(on_successful_conversion, eq); |
| 1821 } |
| 1822 if (on_failed_conversion) { |
| 1823 B(on_failed_conversion, ne); |
| 1824 } |
| 1825 } |
| 1826 |
| 1827 |
| 1828 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { |
| 1829 // Clamp the value to [0..255]. |
| 1830 Cmp(input.W(), Operand(input.W(), UXTB)); |
| 1831 // If input < input & 0xff, it must be < 0, so saturate to 0. |
| 1832 Csel(output.W(), wzr, input.W(), lt); |
| 1833 // Create a constant 0xff. |
| 1834 Mov(WTmp0(), 255); |
| 1835 // If input > input & 0xff, it must be > 255, so saturate to 255. |
| 1836 Csel(output.W(), WTmp0(), output.W(), gt); |
| 1837 } |
| 1838 |
| 1839 |
| 1840 void MacroAssembler::ClampInt32ToUint8(Register in_out) { |
| 1841 ClampInt32ToUint8(in_out, in_out); |
| 1842 } |
| 1843 |
| 1844 |
| 1845 void MacroAssembler::ClampDoubleToUint8(Register output, |
| 1846 DoubleRegister input, |
| 1847 DoubleRegister dbl_scratch) { |
| 1848 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types: |
| 1849 // - Inputs lower than 0 (including -infinity) produce 0. |
| 1850 // - Inputs higher than 255 (including +infinity) produce 255. |
| 1851 // Also, it seems that PIXEL types use round-to-nearest rather than |
| 1852 // round-towards-zero. |
| 1853 |
| 1854 // Squash +infinity before the conversion, since Fcvtnu will normally |
| 1855 // convert it to 0. |
| 1856 Fmov(dbl_scratch, 255); |
| 1857 Fmin(dbl_scratch, dbl_scratch, input); |
| 1858 |
| 1859 // Convert double to unsigned integer. Values less than zero become zero. |
| 1860 // Values greater than 255 have already been clamped to 255. |
| 1861 Fcvtnu(output, dbl_scratch); |
| 1862 } |
| 1863 |
| 1864 |
| 1865 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, |
| 1866 Register src, |
| 1867 unsigned count, |
| 1868 Register scratch1, |
| 1869 Register scratch2, |
| 1870 Register scratch3) { |
| 1871 // Untag src and dst into scratch registers. |
| 1872 // Copy src->dst in a tight loop. |
| 1873 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1())); |
| 1874 ASSERT(count >= 2); |
| 1875 |
| 1876 const Register& remaining = scratch3; |
| 1877 Mov(remaining, count / 2); |
| 1878 |
| 1879 // Only use the Assembler, so we can use Tmp0() and Tmp1(). |
| 1880 InstructionAccurateScope scope(this); |
| 1881 |
| 1882 const Register& dst_untagged = scratch1; |
| 1883 const Register& src_untagged = scratch2; |
| 1884 sub(dst_untagged, dst, kHeapObjectTag); |
| 1885 sub(src_untagged, src, kHeapObjectTag); |
| 1886 |
| 1887 // Copy fields in pairs. |
| 1888 Label loop; |
| 1889 bind(&loop); |
| 1890 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, |
| 1891 PostIndex)); |
| 1892 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, |
| 1893 PostIndex)); |
| 1894 sub(remaining, remaining, 1); |
| 1895 cbnz(remaining, &loop); |
| 1896 |
| 1897 // Handle the leftovers. |
| 1898 if (count & 1) { |
| 1899 ldr(Tmp0(), MemOperand(src_untagged)); |
| 1900 str(Tmp0(), MemOperand(dst_untagged)); |
| 1901 } |
| 1902 } |
| 1903 |
| 1904 |
| 1905 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, |
| 1906 Register src, |
| 1907 unsigned count, |
| 1908 Register scratch1, |
| 1909 Register scratch2) { |
| 1910 // Untag src and dst into scratch registers. |
| 1911 // Copy src->dst in an unrolled loop. |
| 1912 ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1())); |
| 1913 |
| 1914 // Only use the Assembler, so we can use Tmp0() and Tmp1(). |
| 1915 InstructionAccurateScope scope(this); |
| 1916 |
| 1917 const Register& dst_untagged = scratch1; |
| 1918 const Register& src_untagged = scratch2; |
| 1919 sub(dst_untagged, dst, kHeapObjectTag); |
| 1920 sub(src_untagged, src, kHeapObjectTag); |
| 1921 |
| 1922 // Copy fields in pairs. |
| 1923 for (unsigned i = 0; i < count / 2; i++) { |
| 1924 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, |
| 1925 PostIndex)); |
| 1926 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, |
| 1927 PostIndex)); |
| 1928 } |
| 1929 |
| 1930 // Handle the leftovers. |
| 1931 if (count & 1) { |
| 1932 ldr(Tmp0(), MemOperand(src_untagged)); |
| 1933 str(Tmp0(), MemOperand(dst_untagged)); |
| 1934 } |
| 1935 } |
| 1936 |
| 1937 |
| 1938 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, |
| 1939 Register src, |
| 1940 unsigned count, |
| 1941 Register scratch1) { |
| 1942 // Untag src and dst into scratch registers. |
| 1943 // Copy src->dst in an unrolled loop. |
| 1944 ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1())); |
| 1945 |
| 1946 // Only use the Assembler, so we can use Tmp0() and Tmp1(). |
| 1947 InstructionAccurateScope scope(this); |
| 1948 |
| 1949 const Register& dst_untagged = scratch1; |
| 1950 const Register& src_untagged = Tmp1(); |
| 1951 sub(dst_untagged, dst, kHeapObjectTag); |
| 1952 sub(src_untagged, src, kHeapObjectTag); |
| 1953 |
| 1954 // Copy fields one by one. |
| 1955 for (unsigned i = 0; i < count; i++) { |
| 1956 ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex)); |
| 1957 str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex)); |
| 1958 } |
| 1959 } |
| 1960 |
| 1961 |
| 1962 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, |
| 1963 unsigned count) { |
| 1964 // One of two methods is used: |
| 1965 // |
| 1966 // For high 'count' values where many scratch registers are available: |
| 1967 // Untag src and dst into scratch registers. |
| 1968 // Copy src->dst in a tight loop. |
| 1969 // |
| 1970 // For low 'count' values or where few scratch registers are available: |
| 1971 // Untag src and dst into scratch registers. |
| 1972 // Copy src->dst in an unrolled loop. |
| 1973 // |
| 1974 // In both cases, fields are copied in pairs if possible, and left-overs are |
| 1975 // handled separately. |
| 1976 ASSERT(!temps.IncludesAliasOf(dst)); |
| 1977 ASSERT(!temps.IncludesAliasOf(src)); |
| 1978 ASSERT(!temps.IncludesAliasOf(Tmp0())); |
| 1979 ASSERT(!temps.IncludesAliasOf(Tmp1())); |
| 1980 ASSERT(!temps.IncludesAliasOf(xzr)); |
| 1981 ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1())); |
| 1982 |
| 1983 if (emit_debug_code()) { |
| 1984 Cmp(dst, src); |
| 1985 Check(ne, "In CopyFields, the destination is the same as the source."); |
| 1986 } |
| 1987 |
| 1988 // The value of 'count' at which a loop will be generated (if there are |
| 1989 // enough scratch registers). |
| 1990 static const unsigned kLoopThreshold = 8; |
| 1991 |
| 1992 ASSERT(!temps.IsEmpty()); |
| 1993 Register scratch1 = Register(temps.PopLowestIndex()); |
| 1994 Register scratch2 = Register(temps.PopLowestIndex()); |
| 1995 Register scratch3 = Register(temps.PopLowestIndex()); |
| 1996 |
| 1997 if (scratch3.IsValid() && (count >= kLoopThreshold)) { |
| 1998 CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3); |
| 1999 } else if (scratch2.IsValid()) { |
| 2000 CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2); |
| 2001 } else if (scratch1.IsValid()) { |
| 2002 CopyFieldsUnrolledHelper(dst, src, count, scratch1); |
| 2003 } else { |
| 2004 UNREACHABLE(); |
| 2005 } |
| 2006 } |
| 2007 |
| 2008 |
| 2009 void MacroAssembler::CopyBytes(Register dst, |
| 2010 Register src, |
| 2011 Register length, |
| 2012 Register scratch, |
| 2013 CopyHint hint) { |
| 2014 ASSERT(!AreAliased(src, dst, length, scratch)); |
| 2015 |
| 2016 // TODO(all): Implement a faster copy function, and use hint to determine |
| 2017 // which algorithm to use for copies. |
| 2018 if (emit_debug_code()) { |
| 2019 // Check copy length. |
| 2020 Cmp(length, 0); |
| 2021 Assert(ge, "Copy length < 0"); |
| 2022 |
| 2023 // Check src and dst buffers don't overlap. |
| 2024 Add(scratch, src, length); // Calculate end of src buffer. |
| 2025 Cmp(scratch, dst); |
| 2026 Add(scratch, dst, length); // Calculate end of dst buffer. |
| 2027 Ccmp(scratch, src, ZFlag, gt); |
| 2028 Assert(le, "CopyBytes src and dst buffers overlap"); |
| 2029 } |
| 2030 |
| 2031 Label loop, done; |
| 2032 Cbz(length, &done); |
| 2033 |
| 2034 Bind(&loop); |
| 2035 Sub(length, length, 1); |
| 2036 Ldrb(scratch, MemOperand(src, 1, PostIndex)); |
| 2037 Strb(scratch, MemOperand(dst, 1, PostIndex)); |
| 2038 Cbnz(length, &loop); |
| 2039 Bind(&done); |
| 2040 } |
| 2041 |
| 2042 |
| 2043 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, |
| 2044 Register end_offset, |
| 2045 Register filler) { |
| 2046 Label loop, entry; |
| 2047 B(&entry); |
| 2048 Bind(&loop); |
| 2049 // TODO(all): consider using stp here. |
| 2050 Str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); |
| 2051 Bind(&entry); |
| 2052 Cmp(start_offset, end_offset); |
| 2053 B(lt, &loop); |
| 2054 } |
| 2055 |
| 2056 |
| 2057 void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings( |
| 2058 Register first, |
| 2059 Register second, |
| 2060 Register scratch1, |
| 2061 Register scratch2, |
| 2062 Label* failure, |
| 2063 SmiCheckType smi_check) { |
| 2064 |
| 2065 if (smi_check == DO_SMI_CHECK) { |
| 2066 JumpIfEitherSmi(first, second, failure); |
| 2067 } else if (emit_debug_code()) { |
| 2068 ASSERT(smi_check == DONT_DO_SMI_CHECK); |
| 2069 Label not_smi; |
| 2070 JumpIfEitherSmi(first, second, NULL, ¬_smi); |
| 2071 Abort("At least one input is a smi."); |
| 2072 Bind(¬_smi); |
| 2073 } |
| 2074 |
| 2075 // Test that both first and second are sequential ASCII strings. |
| 2076 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); |
| 2077 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); |
| 2078 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
| 2079 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); |
| 2080 |
| 2081 JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1, |
| 2082 scratch2, |
| 2083 scratch1, |
| 2084 scratch2, |
| 2085 failure); |
| 2086 } |
| 2087 |
| 2088 |
| 2089 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii( |
| 2090 Register first, |
| 2091 Register second, |
| 2092 Register scratch1, |
| 2093 Register scratch2, |
| 2094 Label* failure) { |
| 2095 ASSERT(!AreAliased(scratch1, second)); |
| 2096 ASSERT(!AreAliased(scratch1, scratch2)); |
| 2097 static const int kFlatAsciiStringMask = |
| 2098 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
| 2099 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
| 2100 And(scratch1, first, kFlatAsciiStringMask); |
| 2101 And(scratch2, second, kFlatAsciiStringMask); |
| 2102 Cmp(scratch1, kFlatAsciiStringTag); |
| 2103 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); |
| 2104 B(ne, failure); |
| 2105 } |
| 2106 |
| 2107 |
| 2108 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, |
| 2109 Register scratch, |
| 2110 Label* failure) { |
| 2111 static const int kFlatAsciiStringMask = |
| 2112 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
| 2113 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
| 2114 And(scratch, type, kFlatAsciiStringMask); |
| 2115 Cmp(scratch, kFlatAsciiStringTag); |
| 2116 B(ne, failure); |
| 2117 } |
| 2118 |
| 2119 |
| 2120 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( |
| 2121 Register first, |
| 2122 Register second, |
| 2123 Register scratch1, |
| 2124 Register scratch2, |
| 2125 Label* failure) { |
| 2126 ASSERT(!AreAliased(first, second, scratch1, scratch2)); |
| 2127 static const int kFlatAsciiStringMask = |
| 2128 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
| 2129 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
| 2130 And(scratch1, first, kFlatAsciiStringMask); |
| 2131 And(scratch2, second, kFlatAsciiStringMask); |
| 2132 Cmp(scratch1, kFlatAsciiStringTag); |
| 2133 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); |
| 2134 B(ne, failure); |
| 2135 } |
| 2136 |
| 2137 |
| 2138 void MacroAssembler::GenerateNumberUnaryOperation(Token::Value op, |
| 2139 InvokeFlag flag) { |
| 2140 Runtime::FunctionId fid; |
| 2141 switch (op) { |
| 2142 case Token::ADD: |
| 2143 // A unary add on a number (or SMI) does nothing. |
| 2144 if (flag == JUMP_FUNCTION) { |
| 2145 // Emulate a tail call. |
| 2146 Ret(); |
| 2147 } |
| 2148 return; |
| 2149 case Token::SUB: |
| 2150 fid = Runtime::kNumberUnaryMinus; |
| 2151 break; |
| 2152 case Token::BIT_NOT: |
| 2153 fid = Runtime::kNumberNot; |
| 2154 break; |
| 2155 default: |
| 2156 UNREACHABLE(); |
| 2157 fid = Runtime::kAbort; // Please the compiler. |
| 2158 } |
| 2159 if (flag == JUMP_FUNCTION) { |
| 2160 Push(x0); |
| 2161 TailCallRuntime(fid, 1, 1); |
| 2162 } else { |
| 2163 ASSERT(flag == CALL_FUNCTION); |
| 2164 Push(lr); |
| 2165 Push(x0); |
| 2166 CallRuntime(fid, 1); |
| 2167 Pop(lr); |
| 2168 } |
| 2169 } |
| 2170 |
| 2171 void MacroAssembler::GenerateNumberNumberBinaryOperation(Token::Value op, |
| 2172 InvokeFlag flag) { |
| 2173 Register left = x1; |
| 2174 Register right = x0; |
| 2175 |
| 2176 ASSERT(right.is(x0)); |
| 2177 USE(left); |
| 2178 USE(right); |
| 2179 Runtime::FunctionId fid; |
| 2180 |
| 2181 switch (op) { |
| 2182 case Token::ADD: |
| 2183 fid = Runtime::kNumberAdd; |
| 2184 break; |
| 2185 case Token::SUB: |
| 2186 fid = Runtime::kNumberSub; |
| 2187 break; |
| 2188 case Token::MUL: |
| 2189 fid = Runtime::kNumberMul; |
| 2190 break; |
| 2191 case Token::DIV: |
| 2192 fid = Runtime::kNumberDiv; |
| 2193 break; |
| 2194 case Token::MOD: |
| 2195 fid = Runtime::kNumberMod; |
| 2196 break; |
| 2197 case Token::BIT_OR: |
| 2198 fid = Runtime::kNumberOr; |
| 2199 break; |
| 2200 case Token::BIT_AND: |
| 2201 fid = Runtime::kNumberAnd; |
| 2202 break; |
| 2203 case Token::BIT_XOR: |
| 2204 fid = Runtime::kNumberXor; |
| 2205 break; |
| 2206 case Token::SAR: |
| 2207 fid = Runtime::kNumberSar; |
| 2208 break; |
| 2209 case Token::SHR: |
| 2210 fid = Runtime::kNumberShr; |
| 2211 break; |
| 2212 case Token::SHL: |
| 2213 fid = Runtime::kNumberShl; |
| 2214 break; |
| 2215 default: |
| 2216 UNREACHABLE(); |
| 2217 fid = Runtime::kAbort; // Please the compiler. |
| 2218 } |
| 2219 if (flag == JUMP_FUNCTION) { |
| 2220 Push(x1, x0); |
| 2221 TailCallRuntime(fid, 2, 1); |
| 2222 } else { |
| 2223 ASSERT(flag == CALL_FUNCTION); |
| 2224 Push(lr); |
| 2225 Push(x1, x0); |
| 2226 CallRuntime(fid, 2); |
| 2227 Pop(lr); |
| 2228 } |
| 2229 } |
| 2230 |
| 2231 |
| 2232 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
| 2233 const ParameterCount& actual, |
| 2234 Handle<Code> code_constant, |
| 2235 Register code_reg, |
| 2236 Label* done, |
| 2237 InvokeFlag flag, |
| 2238 bool* definitely_mismatches, |
| 2239 const CallWrapper& call_wrapper, |
| 2240 CallKind call_kind) { |
| 2241 bool definitely_matches = false; |
| 2242 *definitely_mismatches = false; |
| 2243 Label regular_invoke; |
| 2244 |
| 2245 // Check whether the expected and actual arguments count match. If not, |
| 2246 // setup registers according to contract with ArgumentsAdaptorTrampoline: |
| 2247 // x0: actual arguments count. |
| 2248 // x1: function (passed through to callee). |
| 2249 // x2: expected arguments count. |
| 2250 // x3: callee code entry. |
| 2251 |
| 2252 // The code below is made a lot easier because the calling code already sets |
| 2253 // up actual and expected registers according to the contract if values are |
| 2254 // passed in registers. |
| 2255 ASSERT(actual.is_immediate() || actual.reg().is(x0)); |
| 2256 ASSERT(expected.is_immediate() || expected.reg().is(x2)); |
| 2257 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3)); |
| 2258 |
| 2259 if (expected.is_immediate()) { |
| 2260 ASSERT(actual.is_immediate()); |
| 2261 if (expected.immediate() == actual.immediate()) { |
| 2262 definitely_matches = true; |
| 2263 |
| 2264 } else { |
| 2265 Mov(x0, actual.immediate()); |
| 2266 if (expected.immediate() == |
| 2267 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { |
| 2268 // Don't worry about adapting arguments for builtins that |
| 2269 // don't want that done. Skip adaption code by making it look |
| 2270 // like we have a match between expected and actual number of |
| 2271 // arguments. |
| 2272 definitely_matches = true; |
| 2273 } else { |
| 2274 *definitely_mismatches = true; |
| 2275 // Set up x2 for the argument adaptor. |
| 2276 Mov(x2, expected.immediate()); |
| 2277 } |
| 2278 } |
| 2279 |
| 2280 } else { // expected is a register. |
| 2281 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate()) |
| 2282 : Operand(actual.reg()); |
| 2283 // If actual == expected perform a regular invocation. |
| 2284 Cmp(expected.reg(), actual_op); |
| 2285 B(eq, ®ular_invoke); |
| 2286 // Otherwise set up x0 for the argument adaptor. |
| 2287 Mov(x0, actual_op); |
| 2288 } |
| 2289 |
| 2290 // If the argument counts may mismatch, generate a call to the argument |
| 2291 // adaptor. |
| 2292 if (!definitely_matches) { |
| 2293 if (!code_constant.is_null()) { |
| 2294 Mov(x3, Operand(code_constant)); |
| 2295 Add(x3, x3, Code::kHeaderSize - kHeapObjectTag); |
| 2296 } |
| 2297 |
| 2298 Handle<Code> adaptor = |
| 2299 isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| 2300 if (flag == CALL_FUNCTION) { |
| 2301 call_wrapper.BeforeCall(CallSize(adaptor)); |
| 2302 SetCallKind(x5, call_kind); |
| 2303 Call(adaptor); |
| 2304 call_wrapper.AfterCall(); |
| 2305 if (!*definitely_mismatches) { |
| 2306 // If the arg counts don't match, no extra code is emitted by |
| 2307 // MAsm::InvokeCode and we can just fall through. |
| 2308 B(done); |
| 2309 } |
| 2310 } else { |
| 2311 SetCallKind(x5, call_kind); |
| 2312 Jump(adaptor, RelocInfo::CODE_TARGET); |
| 2313 } |
| 2314 } |
| 2315 Bind(®ular_invoke); |
| 2316 } |
| 2317 |
| 2318 |
| 2319 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { |
| 2320 // This macro takes the dst register to make the code more readable |
| 2321 // at the call sites. However, the dst register has to be x5 to |
| 2322 // follow the calling convention which requires the call type to be |
| 2323 // in x5. |
| 2324 // |
| 2325 // For example Builtins::Generate_LazyCompile requires this. |
| 2326 ASSERT(dst.is(x5)); |
| 2327 if (call_kind == CALL_AS_FUNCTION) { |
| 2328 Mov(dst, Operand(Smi::FromInt(1))); |
| 2329 } else { |
| 2330 Mov(dst, Operand(Smi::FromInt(0))); |
| 2331 } |
| 2332 } |
| 2333 |
| 2334 |
| 2335 void MacroAssembler::InvokeCode(Register code, |
| 2336 const ParameterCount& expected, |
| 2337 const ParameterCount& actual, |
| 2338 InvokeFlag flag, |
| 2339 const CallWrapper& call_wrapper, |
| 2340 CallKind call_kind) { |
| 2341 // You can't call a function without a valid frame. |
| 2342 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 2343 |
| 2344 Label done; |
| 2345 |
| 2346 bool definitely_mismatches = false; |
| 2347 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, |
| 2348 &definitely_mismatches, call_wrapper, call_kind); |
| 2349 |
| 2350 // If we are certain that actual != expected, then we know InvokePrologue will |
| 2351 // have handled the call through the argument adaptor mechanism. |
| 2352 // The called function expects the call kind in x5. |
| 2353 if (!definitely_mismatches) { |
| 2354 if (flag == CALL_FUNCTION) { |
| 2355 call_wrapper.BeforeCall(CallSize(code)); |
| 2356 SetCallKind(x5, call_kind); |
| 2357 Call(code); |
| 2358 call_wrapper.AfterCall(); |
| 2359 } else { |
| 2360 ASSERT(flag == JUMP_FUNCTION); |
| 2361 SetCallKind(x5, call_kind); |
| 2362 Jump(code); |
| 2363 } |
| 2364 } |
| 2365 |
| 2366 // Continue here if InvokePrologue does handle the invocation due to |
| 2367 // mismatched parameter counts. |
| 2368 Bind(&done); |
| 2369 } |
| 2370 |
| 2371 |
| 2372 void MacroAssembler::InvokeCode(Handle<Code> code, |
| 2373 const ParameterCount& expected, |
| 2374 const ParameterCount& actual, |
| 2375 RelocInfo::Mode rmode, |
| 2376 InvokeFlag flag, |
| 2377 CallKind call_kind) { |
| 2378 // You can't call a function without a valid frame. |
| 2379 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 2380 |
| 2381 Label done; |
| 2382 bool definitely_mismatches = false; |
| 2383 InvokePrologue(expected, actual, code, NoReg, &done, flag, |
| 2384 &definitely_mismatches, NullCallWrapper(), call_kind); |
| 2385 |
| 2386 // The called function expects the call kind in x5. |
| 2387 SetCallKind(x5, call_kind); |
| 2388 if (flag == CALL_FUNCTION) { |
| 2389 Call(code, rmode); |
| 2390 } else { |
| 2391 Jump(code, rmode); |
| 2392 } |
| 2393 |
| 2394 // Continue here if InvokePrologue does handle the invocation due to |
| 2395 // mismatched parameter counts. |
| 2396 Bind(&done); |
| 2397 } |
| 2398 |
| 2399 |
| 2400 void MacroAssembler::InvokeFunction(Register function, |
| 2401 const ParameterCount& actual, |
| 2402 InvokeFlag flag, |
| 2403 const CallWrapper& call_wrapper, |
| 2404 CallKind call_kind) { |
| 2405 // You can't call a function without a valid frame. |
| 2406 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 2407 |
| 2408 // Contract with called JS functions requires that function is passed in x1. |
| 2409 // (See FullCodeGenerator::Generate().) |
| 2410 ASSERT(function.is(x1)); |
| 2411 |
| 2412 Register expected_reg = x2; |
| 2413 Register code_reg = x3; |
| 2414 |
| 2415 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 2416 // The number of arguments is stored as an int32_t, and -1 is a marker |
| 2417 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign |
| 2418 // extension to correctly handle it. |
| 2419 Ldr(expected_reg, FieldMemOperand(function, |
| 2420 JSFunction::kSharedFunctionInfoOffset)); |
| 2421 Ldrsw(expected_reg, |
| 2422 FieldMemOperand(expected_reg, |
| 2423 SharedFunctionInfo::kFormalParameterCountOffset)); |
| 2424 Ldr(code_reg, |
| 2425 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); |
| 2426 |
| 2427 ParameterCount expected(expected_reg); |
| 2428 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); |
| 2429 } |
| 2430 |
| 2431 |
| 2432 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
| 2433 const ParameterCount& expected, |
| 2434 const ParameterCount& actual, |
| 2435 InvokeFlag flag, |
| 2436 const CallWrapper& call_wrapper, |
| 2437 CallKind call_kind, |
| 2438 Register function_reg) { |
| 2439 // You can't call a function without a valid frame. |
| 2440 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 2441 |
| 2442 // Load the function object, if it isn't already loaded. |
| 2443 ASSERT(function_reg.Is(x1) || function_reg.IsNone()); |
| 2444 if (function_reg.IsNone()) { |
| 2445 function_reg = x1; |
| 2446 LoadHeapObject(function_reg, function); |
| 2447 } |
| 2448 |
| 2449 Register code_reg = x3; |
| 2450 |
| 2451 // Set up the context. |
| 2452 Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); |
| 2453 |
| 2454 // We call indirectly through the code field in the function to |
| 2455 // allow recompilation to take effect without changing any of the |
| 2456 // call sites. |
| 2457 Ldr(code_reg, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); |
| 2458 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); |
| 2459 } |
| 2460 |
| 2461 |
| 2462 void MacroAssembler::ECMA262ToInt32(Register result, |
| 2463 DoubleRegister input, |
| 2464 Register scratch1, |
| 2465 Register scratch2, |
| 2466 ECMA262ToInt32Result format) { |
| 2467 ASSERT(!AreAliased(result, scratch1, scratch2)); |
| 2468 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); |
| 2469 STATIC_ASSERT(kSmiTag == 0); |
| 2470 STATIC_ASSERT(kSmiValueSize == 32); |
| 2471 |
| 2472 Label done, tag, manual_conversion; |
| 2473 |
| 2474 // 1. Try to convert with a FPU convert instruction. It's trivial to compute |
| 2475 // the modulo operation on an integer register so we convert to a 64-bit |
| 2476 // integer, then find the 32-bit result from that. |
| 2477 // |
| 2478 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) |
| 2479 // when the double is out of range. NaNs and infinities will be converted to 0 |
| 2480 // (as ECMA-262 requires). |
| 2481 Fcvtzs(result, input); |
| 2482 |
| 2483 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not |
| 2484 // representable using a double, so if the result is one of those then we know |
| 2485 // that saturation occured, and we need to manually handle the conversion. |
| 2486 // |
| 2487 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting |
| 2488 // 1 will cause signed overflow. |
| 2489 Cmp(result, 1); |
| 2490 Ccmp(result, -1, VFlag, vc); |
| 2491 B(vc, &tag); |
| 2492 |
| 2493 // 2. Manually convert the input to an int32. |
| 2494 Fmov(result, input); |
| 2495 |
| 2496 // Extract the exponent. |
| 2497 Register exponent = scratch1; |
| 2498 Ubfx(exponent, result, HeapNumber::kMantissaBits, HeapNumber::kExponentBits); |
| 2499 |
| 2500 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since |
| 2501 // the mantissa gets shifted completely out of the int32_t result. |
| 2502 Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32); |
| 2503 CzeroX(result, ge); |
| 2504 B(ge, &done); |
| 2505 |
| 2506 // The Fcvtzs sequence handles all cases except where the conversion causes |
| 2507 // signed overflow in the int64_t target. Since we've already handled |
| 2508 // exponents >= 84, we can guarantee that 63 <= exponent < 84. |
| 2509 |
| 2510 if (emit_debug_code()) { |
| 2511 Cmp(exponent, HeapNumber::kExponentBias + 63); |
| 2512 Check(ge, "This input should have been handled by the FPU."); |
| 2513 } |
| 2514 |
| 2515 // Isolate the mantissa bits, and set the implicit '1'. |
| 2516 Register mantissa = scratch2; |
| 2517 Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits); |
| 2518 Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits); |
| 2519 |
| 2520 // Negate the mantissa if necessary. |
| 2521 Tst(result, kXSignMask); |
| 2522 Cneg(mantissa, mantissa, ne); |
| 2523 |
| 2524 // Shift the mantissa bits in the correct place. We know that we have to shift |
| 2525 // it left here, because exponent >= 63 >= kMantissaBits. |
| 2526 Sub(exponent, exponent, |
| 2527 HeapNumber::kExponentBias + HeapNumber::kMantissaBits); |
| 2528 Lsl(result, mantissa, exponent); |
| 2529 |
| 2530 Bind(&tag); |
| 2531 switch (format) { |
| 2532 case INT32_IN_W: |
| 2533 // There is nothing to do; the upper 32 bits are undefined. |
| 2534 if (emit_debug_code()) { |
| 2535 __ Mov(scratch1, 0x55555555); |
| 2536 __ Bfi(result, scratch1, 32, 32); |
| 2537 } |
| 2538 break; |
| 2539 case INT32_IN_X: |
| 2540 Sxtw(result, result); |
| 2541 break; |
| 2542 case SMI: |
| 2543 SmiTag(result); |
| 2544 break; |
| 2545 } |
| 2546 |
| 2547 Bind(&done); |
| 2548 } |
| 2549 |
| 2550 |
| 2551 void MacroAssembler::HeapNumberECMA262ToInt32(Register result, |
| 2552 Register heap_number, |
| 2553 Register scratch1, |
| 2554 Register scratch2, |
| 2555 DoubleRegister double_scratch, |
| 2556 ECMA262ToInt32Result format) { |
| 2557 if (emit_debug_code()) { |
| 2558 // Verify we indeed have a HeapNumber. |
| 2559 Label ok; |
| 2560 JumpIfHeapNumber(heap_number, &ok); |
| 2561 Abort("A HeapNumber is expected as input."); |
| 2562 Bind(&ok); |
| 2563 } |
| 2564 |
| 2565 Ldr(double_scratch, FieldMemOperand(heap_number, HeapNumber::kValueOffset)); |
| 2566 ECMA262ToInt32(result, double_scratch, scratch1, scratch2, format); |
| 2567 } |
| 2568 |
| 2569 |
| 2570 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
| 2571 ASSERT(jssp.Is(StackPointer())); |
| 2572 Push(lr, fp, cp); |
| 2573 Mov(Tmp1(), Operand(Smi::FromInt(type))); |
| 2574 Mov(Tmp0(), Operand(CodeObject())); |
| 2575 Push(Tmp1(), Tmp0()); |
| 2576 // jssp[4] : lr |
| 2577 // jssp[3] : fp |
| 2578 // jssp[2] : cp |
| 2579 // jssp[1] : type |
| 2580 // jssp[0] : code object |
| 2581 |
| 2582 // Adjust FP to point to saved FP. |
| 2583 add(fp, jssp, 3 * kXRegSizeInBytes); |
| 2584 } |
| 2585 |
| 2586 |
| 2587 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
| 2588 ASSERT(jssp.Is(StackPointer())); |
| 2589 // Drop the execution stack down to the frame pointer and restore |
| 2590 // the caller frame pointer and return address. |
| 2591 Mov(jssp, fp); |
| 2592 AssertStackConsistency(); |
| 2593 Pop(fp, lr); |
| 2594 } |
| 2595 |
| 2596 |
| 2597 // TODO(jbramley): Check that we're handling FP correctly [GOOGJSE-33]. |
| 2598 void MacroAssembler::EnterExitFrame(bool save_doubles, |
| 2599 const Register& scratch, |
| 2600 int extra_space) { |
| 2601 ASSERT(jssp.Is(StackPointer())); |
| 2602 |
| 2603 // Set up the new stack frame. |
| 2604 Mov(scratch, Operand(CodeObject())); |
| 2605 Push(lr, fp); |
| 2606 Mov(fp, StackPointer()); |
| 2607 Push(xzr, scratch); |
| 2608 // fp[8]: CallerPC (lr) |
| 2609 // fp -> fp[0]: CallerFP (old fp) |
| 2610 // fp[-8]: Space reserved for SPOffset. |
| 2611 // jssp -> fp[-16]: CodeObject() |
| 2612 STATIC_ASSERT((2 * kPointerSize) == |
| 2613 ExitFrameConstants::kCallerSPDisplacement); |
| 2614 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset); |
| 2615 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset); |
| 2616 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset); |
| 2617 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset); |
| 2618 |
| 2619 // Save the frame pointer and context pointer in the top frame. |
| 2620 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
| 2621 isolate()))); |
| 2622 Str(fp, MemOperand(scratch)); |
| 2623 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, |
| 2624 isolate()))); |
| 2625 Str(cp, MemOperand(scratch)); |
| 2626 |
| 2627 if (save_doubles) { |
| 2628 // TODO(jbramley): Implement kSaveFPRegs. It is only used by Lithium. |
| 2629 TODO_UNIMPLEMENTED("EnterExitFrame: save_doubles"); |
| 2630 } |
| 2631 |
| 2632 // Reserve space for the return address and for user requested memory. |
| 2633 // We do this before aligning to make sure that we end up correctly |
| 2634 // aligned with the minimum of wasted space. |
| 2635 Claim(extra_space + 1, kXRegSizeInBytes); |
| 2636 // fp[8]: CallerPC (lr) |
| 2637 // fp -> fp[0]: CallerFP (old fp) |
| 2638 // fp[-8]: Space reserved for SPOffset. |
| 2639 // fp[-16]: CodeObject() |
| 2640 // jssp[8 + extra_space * 8]: Saved doubles (if save_doubles is true). |
| 2641 // jssp[8]: Extra space reserved for caller (if extra_space != 0). |
| 2642 // jssp -> jssp[0]: Space reserved for the return address. |
| 2643 STATIC_ASSERT((-3 * kPointerSize) == |
| 2644 ExitFrameConstants::kCallerSavedRegsOffset); |
| 2645 |
| 2646 // Align and synchronize the system stack pointer with jssp. |
| 2647 AlignAndSetCSPForFrame(); |
| 2648 ASSERT(csp.Is(StackPointer())); |
| 2649 |
| 2650 // fp[8]: CallerPC (lr) |
| 2651 // fp -> fp[0]: CallerFP (old fp) |
| 2652 // fp[-8]: Space reserved for SPOffset. |
| 2653 // fp[-16]: CodeObject() |
| 2654 // csp[...]: Saved doubles, if saved_doubles is true. |
| 2655 // csp[8]: Memory reserved for the caller if extra_space != 0. |
| 2656 // Alignment padding, if necessary. |
| 2657 // csp -> csp[0]: Space reserved for the return address. |
| 2658 |
| 2659 // ExitFrame::GetStateForFramePointer expects to find the return address at |
| 2660 // the memory address immediately below the pointer stored in SPOffset. |
| 2661 // It is not safe to derive much else from SPOffset, because the size of the |
| 2662 // padding can vary. |
| 2663 Add(scratch, csp, kXRegSizeInBytes); |
| 2664 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
| 2665 } |
| 2666 |
| 2667 |
| 2668 // Leave the current exit frame. |
| 2669 void MacroAssembler::LeaveExitFrame(bool restore_doubles, |
| 2670 const Register& scratch) { |
| 2671 ASSERT(csp.Is(StackPointer())); |
| 2672 |
| 2673 if (restore_doubles) { |
| 2674 // TODO(jbramley): Implement kSaveFPRegs. It is only used by Lithium. |
| 2675 TODO_UNIMPLEMENTED("LeaveExitFrame: restore_doubles"); |
| 2676 } |
| 2677 |
| 2678 // Restore the context pointer from the top frame. |
| 2679 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, |
| 2680 isolate()))); |
| 2681 Ldr(cp, MemOperand(scratch)); |
| 2682 if (emit_debug_code()) { |
| 2683 // Also emit debug code to clear the cp in the top frame. |
| 2684 Str(xzr, MemOperand(scratch)); |
| 2685 } |
| 2686 // Clear the frame pointer from the top frame. |
| 2687 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
| 2688 isolate()))); |
| 2689 Str(xzr, MemOperand(scratch)); |
| 2690 |
| 2691 // Pop the exit frame. |
| 2692 // fp[8]: CallerPC (lr) |
| 2693 // fp -> fp[0]: CallerFP (old fp) |
| 2694 // fp[...]: The rest of the frame. |
| 2695 Mov(jssp, fp); |
| 2696 SetStackPointer(jssp); |
| 2697 AssertStackConsistency(); |
| 2698 Pop(fp, lr); |
| 2699 } |
| 2700 |
| 2701 |
| 2702 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
| 2703 Register scratch1, Register scratch2) { |
| 2704 if (FLAG_native_code_counters && counter->Enabled()) { |
| 2705 Mov(scratch1, value); |
| 2706 Mov(scratch2, Operand(ExternalReference(counter))); |
| 2707 Str(scratch1, MemOperand(scratch2)); |
| 2708 } |
| 2709 } |
| 2710 |
| 2711 |
| 2712 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
| 2713 Register scratch1, Register scratch2) { |
| 2714 ASSERT(value != 0); |
| 2715 if (FLAG_native_code_counters && counter->Enabled()) { |
| 2716 Mov(scratch2, Operand(ExternalReference(counter))); |
| 2717 Ldr(scratch1, MemOperand(scratch2)); |
| 2718 Add(scratch1, scratch1, value); |
| 2719 Str(scratch1, MemOperand(scratch2)); |
| 2720 } |
| 2721 } |
| 2722 |
| 2723 |
| 2724 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
| 2725 Register scratch1, Register scratch2) { |
| 2726 IncrementCounter(counter, -value, scratch1, scratch2); |
| 2727 } |
| 2728 |
| 2729 |
| 2730 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
| 2731 if (context_chain_length > 0) { |
| 2732 // Move up the chain of contexts to the context containing the slot. |
| 2733 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| 2734 for (int i = 1; i < context_chain_length; i++) { |
| 2735 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| 2736 } |
| 2737 } else { |
| 2738 // Slot is in the current function context. Move it into the |
| 2739 // destination register in case we store into it (the write barrier |
| 2740 // cannot be allowed to destroy the context in cp). |
| 2741 Mov(dst, cp); |
| 2742 } |
| 2743 } |
| 2744 |
| 2745 |
| 2746 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 2747 void MacroAssembler::DebugBreak() { |
| 2748 Mov(x0, 0); |
| 2749 Mov(x1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); |
| 2750 CEntryStub ces(1); |
| 2751 ASSERT(AllowThisStubCall(&ces)); |
| 2752 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); |
| 2753 } |
| 2754 #endif |
| 2755 |
| 2756 |
| 2757 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, |
| 2758 int handler_index) { |
| 2759 ASSERT(jssp.Is(StackPointer())); |
| 2760 // Adjust this code if the asserts don't hold. |
| 2761 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 2762 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
| 2763 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
| 2764 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
| 2765 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
| 2766 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
| 2767 |
| 2768 // For the JSEntry handler, we must preserve the live registers x0-x4. |
| 2769 // (See JSEntryStub::GenerateBody().) |
| 2770 |
| 2771 unsigned state = |
| 2772 StackHandler::IndexField::encode(handler_index) | |
| 2773 StackHandler::KindField::encode(kind); |
| 2774 |
| 2775 // Set up the code object and the state for pushing. |
| 2776 Mov(x10, Operand(CodeObject())); |
| 2777 Mov(x11, state); |
| 2778 |
| 2779 // Push the frame pointer, context, state, and code object. |
| 2780 if (kind == StackHandler::JS_ENTRY) { |
| 2781 ASSERT(Smi::FromInt(0) == 0); |
| 2782 Push(xzr, xzr, x11, x10); |
| 2783 } else { |
| 2784 Push(fp, cp, x11, x10); |
| 2785 } |
| 2786 |
| 2787 // Link the current handler as the next handler. |
| 2788 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 2789 Ldr(x10, MemOperand(x11)); |
| 2790 Push(x10); |
| 2791 // Set this new handler as the current one. |
| 2792 Str(jssp, MemOperand(x11)); |
| 2793 } |
| 2794 |
| 2795 |
| 2796 void MacroAssembler::PopTryHandler() { |
| 2797 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 2798 Pop(x10); |
| 2799 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 2800 Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes); |
| 2801 Str(x10, MemOperand(x11)); |
| 2802 } |
| 2803 |
| 2804 |
| 2805 void MacroAssembler::Allocate(int object_size, |
| 2806 Register result, |
| 2807 Register scratch1, |
| 2808 Register scratch2, |
| 2809 Label* gc_required, |
| 2810 AllocationFlags flags) { |
| 2811 if (!FLAG_inline_new) { |
| 2812 if (emit_debug_code()) { |
| 2813 // Trash the registers to simulate an allocation failure. |
| 2814 // We apply salt to the original zap value to easily spot the values. |
| 2815 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); |
| 2816 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); |
| 2817 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); |
| 2818 } |
| 2819 B(gc_required); |
| 2820 return; |
| 2821 } |
| 2822 |
| 2823 ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1())); |
| 2824 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() && |
| 2825 Tmp0().Is64Bits() && Tmp1().Is64Bits()); |
| 2826 |
| 2827 // Make object size into bytes. |
| 2828 if ((flags & SIZE_IN_WORDS) != 0) { |
| 2829 object_size *= kPointerSize; |
| 2830 } |
| 2831 ASSERT(0 == (object_size & kObjectAlignmentMask)); |
| 2832 |
| 2833 // Check relative positions of allocation top and limit addresses. |
| 2834 // The values must be adjacent in memory to allow the use of LDP. |
| 2835 ExternalReference heap_allocation_top = |
| 2836 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
| 2837 ExternalReference heap_allocation_limit = |
| 2838 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 2839 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); |
| 2840 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); |
| 2841 ASSERT((limit - top) == kPointerSize); |
| 2842 |
| 2843 // Set up allocation top address and object size registers. |
| 2844 Register top_address = scratch1; |
| 2845 Register allocation_limit = scratch2; |
| 2846 Mov(top_address, Operand(heap_allocation_top)); |
| 2847 |
| 2848 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 2849 // Load allocation top into result and the allocation limit. |
| 2850 Ldp(result, allocation_limit, MemOperand(top_address)); |
| 2851 } else { |
| 2852 if (emit_debug_code()) { |
| 2853 // Assert that result actually contains top on entry. |
| 2854 Ldr(Tmp0(), MemOperand(top_address)); |
| 2855 Cmp(result, Tmp0()); |
| 2856 Check(eq, "Unexpected allocation top."); |
| 2857 } |
| 2858 // Load the allocation limit. 'result' already contains the allocation top. |
| 2859 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
| 2860 } |
| 2861 |
| 2862 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
| 2863 // the same alignment on A64. |
| 2864 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 2865 |
| 2866 // Calculate new top and bail out if new space is exhausted. |
| 2867 Adds(Tmp1(), result, object_size); |
| 2868 B(vs, gc_required); |
| 2869 Cmp(Tmp1(), allocation_limit); |
| 2870 B(hi, gc_required); |
| 2871 Str(Tmp1(), MemOperand(top_address)); |
| 2872 |
| 2873 // Tag the object if requested. |
| 2874 if ((flags & TAG_OBJECT) != 0) { |
| 2875 Orr(result, result, kHeapObjectTag); |
| 2876 } |
| 2877 } |
| 2878 |
| 2879 |
| 2880 void MacroAssembler::Allocate(Register object_size, |
| 2881 Register result, |
| 2882 Register scratch1, |
| 2883 Register scratch2, |
| 2884 Label* gc_required, |
| 2885 AllocationFlags flags) { |
| 2886 if (!FLAG_inline_new) { |
| 2887 if (emit_debug_code()) { |
| 2888 // Trash the registers to simulate an allocation failure. |
| 2889 // We apply salt to the original zap value to easily spot the values. |
| 2890 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); |
| 2891 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); |
| 2892 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); |
| 2893 } |
| 2894 B(gc_required); |
| 2895 return; |
| 2896 } |
| 2897 |
| 2898 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1())); |
| 2899 ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() && |
| 2900 scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits()); |
| 2901 |
| 2902 // Check relative positions of allocation top and limit addresses. |
| 2903 // The values must be adjacent in memory to allow the use of LDP. |
| 2904 ExternalReference heap_allocation_top = |
| 2905 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
| 2906 ExternalReference heap_allocation_limit = |
| 2907 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 2908 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); |
| 2909 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); |
| 2910 ASSERT((limit - top) == kPointerSize); |
| 2911 |
| 2912 // Set up allocation top address and object size registers. |
| 2913 Register top_address = scratch1; |
| 2914 Register allocation_limit = scratch2; |
| 2915 Mov(top_address, Operand(heap_allocation_top)); |
| 2916 |
| 2917 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 2918 // Load allocation top into result and the allocation limit. |
| 2919 Ldp(result, allocation_limit, MemOperand(top_address)); |
| 2920 } else { |
| 2921 if (emit_debug_code()) { |
| 2922 // Assert that result actually contains top on entry. |
| 2923 Ldr(Tmp0(), MemOperand(top_address)); |
| 2924 Cmp(result, Tmp0()); |
| 2925 Check(eq, "Unexpected allocation top."); |
| 2926 } |
| 2927 // Load the allocation limit. 'result' already contains the allocation top. |
| 2928 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
| 2929 } |
| 2930 |
| 2931 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
| 2932 // the same alignment on A64. |
| 2933 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 2934 |
| 2935 // Calculate new top and bail out if new space is exhausted |
| 2936 if ((flags & SIZE_IN_WORDS) != 0) { |
| 2937 Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2)); |
| 2938 } else { |
| 2939 Adds(Tmp1(), result, object_size); |
| 2940 } |
| 2941 |
| 2942 if (emit_debug_code()) { |
| 2943 Tst(Tmp1(), kObjectAlignmentMask); |
| 2944 Check(eq, "Unaligned allocation in new space"); |
| 2945 } |
| 2946 |
| 2947 B(vs, gc_required); |
| 2948 Cmp(Tmp1(), allocation_limit); |
| 2949 B(hi, gc_required); |
| 2950 Str(Tmp1(), MemOperand(top_address)); |
| 2951 |
| 2952 // Tag the object if requested. |
| 2953 if ((flags & TAG_OBJECT) != 0) { |
| 2954 Orr(result, result, kHeapObjectTag); |
| 2955 } |
| 2956 } |
| 2957 |
| 2958 |
| 2959 void MacroAssembler::UndoAllocationInNewSpace(Register object, |
| 2960 Register scratch) { |
| 2961 ExternalReference new_space_allocation_top = |
| 2962 ExternalReference::new_space_allocation_top_address(isolate()); |
| 2963 |
| 2964 // Make sure the object has no tag before resetting top. |
| 2965 Bic(object, object, kHeapObjectTagMask); |
| 2966 #ifdef DEBUG |
| 2967 // Check that the object un-allocated is below the current top. |
| 2968 Mov(scratch, Operand(new_space_allocation_top)); |
| 2969 Ldr(scratch, MemOperand(scratch)); |
| 2970 Cmp(object, scratch); |
| 2971 Check(lt, "Trying to undo allocation of non allocated memory."); |
| 2972 #endif |
| 2973 // Write the address of the object to un-allocate as the current top. |
| 2974 Mov(scratch, Operand(new_space_allocation_top)); |
| 2975 Str(object, MemOperand(scratch)); |
| 2976 } |
| 2977 |
| 2978 |
| 2979 void MacroAssembler::AllocateTwoByteString(Register result, |
| 2980 Register length, |
| 2981 Register scratch1, |
| 2982 Register scratch2, |
| 2983 Register scratch3, |
| 2984 Label* gc_required) { |
| 2985 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); |
| 2986 // Calculate the number of bytes needed for the characters in the string while |
| 2987 // observing object alignment. |
| 2988 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 2989 Add(scratch1, length, length); // Length in bytes, not chars. |
| 2990 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); |
| 2991 Bic(scratch1, scratch1, kObjectAlignmentMask); |
| 2992 |
| 2993 // Allocate two-byte string in new space. |
| 2994 Allocate(scratch1, |
| 2995 result, |
| 2996 scratch2, |
| 2997 scratch3, |
| 2998 gc_required, |
| 2999 TAG_OBJECT); |
| 3000 |
| 3001 // Set the map, length and hash field. |
| 3002 InitializeNewString(result, |
| 3003 length, |
| 3004 Heap::kStringMapRootIndex, |
| 3005 scratch1, |
| 3006 scratch2); |
| 3007 } |
| 3008 |
| 3009 |
| 3010 void MacroAssembler::AllocateAsciiString(Register result, |
| 3011 Register length, |
| 3012 Register scratch1, |
| 3013 Register scratch2, |
| 3014 Register scratch3, |
| 3015 Label* gc_required) { |
| 3016 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); |
| 3017 // Calculate the number of bytes needed for the characters in the string while |
| 3018 // observing object alignment. |
| 3019 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 3020 STATIC_ASSERT(kCharSize == 1); |
| 3021 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); |
| 3022 Bic(scratch1, scratch1, kObjectAlignmentMask); |
| 3023 |
| 3024 // Allocate ASCII string in new space. |
| 3025 Allocate(scratch1, |
| 3026 result, |
| 3027 scratch2, |
| 3028 scratch3, |
| 3029 gc_required, |
| 3030 TAG_OBJECT); |
| 3031 |
| 3032 // Set the map, length and hash field. |
| 3033 InitializeNewString(result, |
| 3034 length, |
| 3035 Heap::kAsciiStringMapRootIndex, |
| 3036 scratch1, |
| 3037 scratch2); |
| 3038 } |
| 3039 |
| 3040 |
| 3041 void MacroAssembler::AllocateTwoByteConsString(Register result, |
| 3042 Register length, |
| 3043 Register scratch1, |
| 3044 Register scratch2, |
| 3045 Label* gc_required) { |
| 3046 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, |
| 3047 TAG_OBJECT); |
| 3048 |
| 3049 InitializeNewString(result, |
| 3050 length, |
| 3051 Heap::kConsStringMapRootIndex, |
| 3052 scratch1, |
| 3053 scratch2); |
| 3054 } |
| 3055 |
| 3056 |
| 3057 void MacroAssembler::AllocateAsciiConsString(Register result, |
| 3058 Register length, |
| 3059 Register scratch1, |
| 3060 Register scratch2, |
| 3061 Label* gc_required) { |
| 3062 Label allocate_new_space, install_map; |
| 3063 AllocationFlags flags = TAG_OBJECT; |
| 3064 |
| 3065 ExternalReference high_promotion_mode = ExternalReference:: |
| 3066 new_space_high_promotion_mode_active_address(isolate()); |
| 3067 Mov(scratch1, Operand(high_promotion_mode)); |
| 3068 Ldr(scratch1, MemOperand(scratch1)); |
| 3069 Cbz(scratch1, &allocate_new_space); |
| 3070 |
| 3071 Allocate(ConsString::kSize, |
| 3072 result, |
| 3073 scratch1, |
| 3074 scratch2, |
| 3075 gc_required, |
| 3076 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); |
| 3077 |
| 3078 B(&install_map); |
| 3079 |
| 3080 Bind(&allocate_new_space); |
| 3081 Allocate(ConsString::kSize, |
| 3082 result, |
| 3083 scratch1, |
| 3084 scratch2, |
| 3085 gc_required, |
| 3086 flags); |
| 3087 |
| 3088 Bind(&install_map); |
| 3089 |
| 3090 InitializeNewString(result, |
| 3091 length, |
| 3092 Heap::kConsAsciiStringMapRootIndex, |
| 3093 scratch1, |
| 3094 scratch2); |
| 3095 } |
| 3096 |
| 3097 |
| 3098 void MacroAssembler::AllocateTwoByteSlicedString(Register result, |
| 3099 Register length, |
| 3100 Register scratch1, |
| 3101 Register scratch2, |
| 3102 Label* gc_required) { |
| 3103 ASSERT(!AreAliased(result, length, scratch1, scratch2)); |
| 3104 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, |
| 3105 TAG_OBJECT); |
| 3106 |
| 3107 InitializeNewString(result, |
| 3108 length, |
| 3109 Heap::kSlicedStringMapRootIndex, |
| 3110 scratch1, |
| 3111 scratch2); |
| 3112 } |
| 3113 |
| 3114 |
| 3115 void MacroAssembler::AllocateAsciiSlicedString(Register result, |
| 3116 Register length, |
| 3117 Register scratch1, |
| 3118 Register scratch2, |
| 3119 Label* gc_required) { |
| 3120 ASSERT(!AreAliased(result, length, scratch1, scratch2)); |
| 3121 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, |
| 3122 TAG_OBJECT); |
| 3123 |
| 3124 InitializeNewString(result, |
| 3125 length, |
| 3126 Heap::kSlicedAsciiStringMapRootIndex, |
| 3127 scratch1, |
| 3128 scratch2); |
| 3129 } |
| 3130 |
| 3131 |
| 3132 // Allocates a heap number or jumps to the need_gc label if the young space |
| 3133 // is full and a scavenge is needed. |
| 3134 void MacroAssembler::AllocateHeapNumber(Register result, |
| 3135 Label* gc_required, |
| 3136 Register scratch1, |
| 3137 Register scratch2, |
| 3138 Register heap_number_map) { |
| 3139 // Allocate an object in the heap for the heap number and tag it as a heap |
| 3140 // object. |
| 3141 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, |
| 3142 TAG_OBJECT); |
| 3143 |
| 3144 // Store heap number map in the allocated object. |
| 3145 if (heap_number_map.Is(NoReg)) { |
| 3146 heap_number_map = scratch1; |
| 3147 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 3148 } |
| 3149 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 3150 Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); |
| 3151 } |
| 3152 |
| 3153 |
| 3154 void MacroAssembler::AllocateHeapNumberWithValue(Register result, |
| 3155 DoubleRegister value, |
| 3156 Label* gc_required, |
| 3157 Register scratch1, |
| 3158 Register scratch2, |
| 3159 Register heap_number_map) { |
| 3160 // TODO(all): Check if it would be more efficient to use STP to store both |
| 3161 // the map and the value. |
| 3162 AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map); |
| 3163 Str(value, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 3164 } |
| 3165 |
| 3166 |
| 3167 void MacroAssembler::JumpIfObjectType(Register object, |
| 3168 Register map, |
| 3169 Register type_reg, |
| 3170 InstanceType type, |
| 3171 Label* if_cond_pass, |
| 3172 Condition cond) { |
| 3173 CompareObjectType(object, map, type_reg, type); |
| 3174 B(cond, if_cond_pass); |
| 3175 } |
| 3176 |
| 3177 |
| 3178 void MacroAssembler::JumpIfNotObjectType(Register object, |
| 3179 Register map, |
| 3180 Register type_reg, |
| 3181 InstanceType type, |
| 3182 Label* if_not_object) { |
| 3183 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne); |
| 3184 } |
| 3185 |
| 3186 |
| 3187 // Sets condition flags based on comparison, and returns type in type_reg. |
| 3188 void MacroAssembler::CompareObjectType(Register object, |
| 3189 Register map, |
| 3190 Register type_reg, |
| 3191 InstanceType type) { |
| 3192 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3193 CompareInstanceType(map, type_reg, type); |
| 3194 } |
| 3195 |
| 3196 |
| 3197 // Sets condition flags based on comparison, and returns type in type_reg. |
| 3198 void MacroAssembler::CompareInstanceType(Register map, |
| 3199 Register type_reg, |
| 3200 InstanceType type) { |
| 3201 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 3202 Cmp(type_reg, type); |
| 3203 } |
| 3204 |
| 3205 |
| 3206 void MacroAssembler::CompareMap(Register obj, |
| 3207 Register scratch, |
| 3208 Handle<Map> map, |
| 3209 Label* early_success) { |
| 3210 // TODO(jbramley): The early_success label isn't used. Remove it. |
| 3211 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 3212 CompareMap(scratch, map, early_success); |
| 3213 } |
| 3214 |
| 3215 |
| 3216 void MacroAssembler::CompareMap(Register obj_map, |
| 3217 Handle<Map> map, |
| 3218 Label* early_success) { |
| 3219 // TODO(jbramley): The early_success label isn't used. Remove it. |
| 3220 Cmp(obj_map, Operand(map)); |
| 3221 } |
| 3222 |
| 3223 |
| 3224 void MacroAssembler::CheckMap(Register obj, |
| 3225 Register scratch, |
| 3226 Handle<Map> map, |
| 3227 Label* fail, |
| 3228 SmiCheckType smi_check_type) { |
| 3229 if (smi_check_type == DO_SMI_CHECK) { |
| 3230 JumpIfSmi(obj, fail); |
| 3231 } |
| 3232 |
| 3233 Label success; |
| 3234 CompareMap(obj, scratch, map, &success); |
| 3235 B(ne, fail); |
| 3236 Bind(&success); |
| 3237 } |
| 3238 |
| 3239 |
| 3240 void MacroAssembler::CheckMap(Register obj, |
| 3241 Register scratch, |
| 3242 Heap::RootListIndex index, |
| 3243 Label* fail, |
| 3244 SmiCheckType smi_check_type) { |
| 3245 if (smi_check_type == DO_SMI_CHECK) { |
| 3246 JumpIfSmi(obj, fail); |
| 3247 } |
| 3248 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 3249 JumpIfNotRoot(scratch, index, fail); |
| 3250 } |
| 3251 |
| 3252 |
| 3253 void MacroAssembler::CheckMap(Register obj_map, |
| 3254 Handle<Map> map, |
| 3255 Label* fail, |
| 3256 SmiCheckType smi_check_type) { |
| 3257 if (smi_check_type == DO_SMI_CHECK) { |
| 3258 JumpIfSmi(obj_map, fail); |
| 3259 } |
| 3260 Label success; |
| 3261 CompareMap(obj_map, map, &success); |
| 3262 B(ne, fail); |
| 3263 Bind(&success); |
| 3264 } |
| 3265 |
| 3266 |
| 3267 void MacroAssembler::DispatchMap(Register obj, |
| 3268 Register scratch, |
| 3269 Handle<Map> map, |
| 3270 Handle<Code> success, |
| 3271 SmiCheckType smi_check_type) { |
| 3272 Label fail; |
| 3273 if (smi_check_type == DO_SMI_CHECK) { |
| 3274 JumpIfSmi(obj, &fail); |
| 3275 } |
| 3276 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 3277 Cmp(scratch, Operand(map)); |
| 3278 B(ne, &fail); |
| 3279 Jump(success, RelocInfo::CODE_TARGET); |
| 3280 Bind(&fail); |
| 3281 } |
| 3282 |
| 3283 |
| 3284 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { |
| 3285 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3286 Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset)); |
| 3287 Tst(Tmp0(), mask); |
| 3288 } |
| 3289 |
| 3290 |
| 3291 void MacroAssembler::LoadElementsKind(Register result, Register object) { |
| 3292 // Load map. |
| 3293 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3294 // Load the map's "bit field 2". |
| 3295 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset)); |
| 3296 // Retrieve elements_kind from bit field 2. |
| 3297 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); |
| 3298 } |
| 3299 |
| 3300 |
| 3301 void MacroAssembler::TryGetFunctionPrototype(Register function, |
| 3302 Register result, |
| 3303 Register scratch, |
| 3304 Label* miss, |
| 3305 BoundFunctionAction action) { |
| 3306 ASSERT(!AreAliased(function, result, scratch)); |
| 3307 |
| 3308 // Check that the receiver isn't a smi. |
| 3309 JumpIfSmi(function, miss); |
| 3310 |
| 3311 // Check that the function really is a function. Load map into result reg. |
| 3312 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss); |
| 3313 |
| 3314 if (action == kMissOnBoundFunction) { |
| 3315 Register scratch_w = scratch.W(); |
| 3316 Ldr(scratch, |
| 3317 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
| 3318 // On 64-bit platforms, compiler hints field is not a smi. See definition of |
| 3319 // kCompilerHintsOffset in src/objects.h. |
| 3320 Ldr(scratch_w, |
| 3321 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); |
| 3322 Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss); |
| 3323 } |
| 3324 |
| 3325 // Make sure that the function has an instance prototype. |
| 3326 Label non_instance; |
| 3327 Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); |
| 3328 Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance); |
| 3329 |
| 3330 // Get the prototype or initial map from the function. |
| 3331 Ldr(result, |
| 3332 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3333 |
| 3334 // If the prototype or initial map is the hole, don't return it and simply |
| 3335 // miss the cache instead. This will allow us to allocate a prototype object |
| 3336 // on-demand in the runtime system. |
| 3337 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss); |
| 3338 |
| 3339 // If the function does not have an initial map, we're done. |
| 3340 Label done; |
| 3341 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done); |
| 3342 |
| 3343 // Get the prototype from the initial map. |
| 3344 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 3345 B(&done); |
| 3346 |
| 3347 // Non-instance prototype: fetch prototype from constructor field in initial |
| 3348 // map. |
| 3349 Bind(&non_instance); |
| 3350 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); |
| 3351 |
| 3352 // All done. |
| 3353 Bind(&done); |
| 3354 } |
| 3355 |
| 3356 |
| 3357 void MacroAssembler::CompareRoot(const Register& obj, |
| 3358 Heap::RootListIndex index) { |
| 3359 ASSERT(!AreAliased(obj, Tmp0())); |
| 3360 LoadRoot(Tmp0(), index); |
| 3361 Cmp(obj, Tmp0()); |
| 3362 } |
| 3363 |
| 3364 |
| 3365 void MacroAssembler::JumpIfRoot(const Register& obj, |
| 3366 Heap::RootListIndex index, |
| 3367 Label* if_equal) { |
| 3368 CompareRoot(obj, index); |
| 3369 B(eq, if_equal); |
| 3370 } |
| 3371 |
| 3372 |
| 3373 void MacroAssembler::JumpIfNotRoot(const Register& obj, |
| 3374 Heap::RootListIndex index, |
| 3375 Label* if_not_equal) { |
| 3376 CompareRoot(obj, index); |
| 3377 B(ne, if_not_equal); |
| 3378 } |
| 3379 |
| 3380 |
| 3381 void MacroAssembler::CompareAndSplit(const Register& lhs, |
| 3382 const Operand& rhs, |
| 3383 Condition cond, |
| 3384 Label* if_true, |
| 3385 Label* if_false, |
| 3386 Label* fall_through) { |
| 3387 if ((if_true == if_false) && (if_false == fall_through)) { |
| 3388 // Fall through. |
| 3389 } else if (if_true == if_false) { |
| 3390 B(if_true); |
| 3391 } else if (if_false == fall_through) { |
| 3392 CompareAndBranch(lhs, rhs, cond, if_true); |
| 3393 } else if (if_true == fall_through) { |
| 3394 CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false); |
| 3395 } else { |
| 3396 CompareAndBranch(lhs, rhs, cond, if_true); |
| 3397 B(if_false); |
| 3398 } |
| 3399 } |
| 3400 |
| 3401 |
| 3402 void MacroAssembler::TestAndSplit(const Register& reg, |
| 3403 uint64_t bit_pattern, |
| 3404 Label* if_all_clear, |
| 3405 Label* if_any_set, |
| 3406 Label* fall_through) { |
| 3407 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) { |
| 3408 // Fall through. |
| 3409 } else if (if_all_clear == if_any_set) { |
| 3410 B(if_all_clear); |
| 3411 } else if (if_all_clear == fall_through) { |
| 3412 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set); |
| 3413 } else if (if_any_set == fall_through) { |
| 3414 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear); |
| 3415 } else { |
| 3416 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set); |
| 3417 B(if_all_clear); |
| 3418 } |
| 3419 } |
| 3420 |
| 3421 |
| 3422 void MacroAssembler::CheckFastElements(Register map, |
| 3423 Register scratch, |
| 3424 Label* fail) { |
| 3425 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
| 3426 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 3427 STATIC_ASSERT(FAST_ELEMENTS == 2); |
| 3428 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
| 3429 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
| 3430 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue); |
| 3431 B(hi, fail); |
| 3432 } |
| 3433 |
| 3434 |
| 3435 void MacroAssembler::CheckFastObjectElements(Register map, |
| 3436 Register scratch, |
| 3437 Label* fail) { |
| 3438 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
| 3439 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 3440 STATIC_ASSERT(FAST_ELEMENTS == 2); |
| 3441 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
| 3442 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
| 3443 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
| 3444 // If cond==ls, set cond=hi, otherwise compare. |
| 3445 Ccmp(scratch, |
| 3446 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi); |
| 3447 B(hi, fail); |
| 3448 } |
| 3449 |
| 3450 |
| 3451 void MacroAssembler::CheckFastSmiElements(Register map, |
| 3452 Register scratch, |
| 3453 Label* fail) { |
| 3454 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
| 3455 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 3456 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
| 3457 Cmp(scratch, Map::kMaximumBitField2FastHoleySmiElementValue); |
| 3458 B(hi, fail); |
| 3459 } |
| 3460 |
| 3461 |
| 3462 // Note: The ARM version of this clobbers elements_reg, but this version does |
| 3463 // not. Some uses of this in A64 assume that elements_reg will be preserved. |
| 3464 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, |
| 3465 Register key_reg, |
| 3466 Register elements_reg, |
| 3467 Register scratch1, |
| 3468 FPRegister fpscratch1, |
| 3469 FPRegister fpscratch2, |
| 3470 Label* fail) { |
| 3471 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); |
| 3472 Label store_num; |
| 3473 |
| 3474 // Speculatively convert the smi to a double - all smis can be exactly |
| 3475 // represented as a double. |
| 3476 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); |
| 3477 |
| 3478 // If value_reg is a smi, we're done. |
| 3479 JumpIfSmi(value_reg, &store_num); |
| 3480 |
| 3481 // Ensure that the object is a heap number. |
| 3482 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), |
| 3483 fail, DONT_DO_SMI_CHECK); |
| 3484 |
| 3485 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
| 3486 Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); |
| 3487 |
| 3488 // Check for NaN by comparing the number to itself: NaN comparison will |
| 3489 // report unordered, indicated by the overflow flag being set. |
| 3490 Fcmp(fpscratch1, fpscratch1); |
| 3491 Fcsel(fpscratch1, fpscratch2, fpscratch1, vs); |
| 3492 |
| 3493 // Store the result. |
| 3494 Bind(&store_num); |
| 3495 Add(scratch1, elements_reg, |
| 3496 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2)); |
| 3497 Str(fpscratch1, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); |
| 3498 } |
| 3499 |
| 3500 |
| 3501 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
| 3502 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; |
| 3503 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate()); |
| 3504 } |
| 3505 |
| 3506 |
| 3507 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
| 3508 // If the hash field contains an array index pick it out. The assert checks |
| 3509 // that the constants for the maximum number of digits for an array index |
| 3510 // cached in the hash field and the number of bits reserved for it does not |
| 3511 // conflict. |
| 3512 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < |
| 3513 (1 << String::kArrayIndexValueBits)); |
| 3514 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in |
| 3515 // the low kHashShift bits. |
| 3516 STATIC_ASSERT(kSmiTag == 0); |
| 3517 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); |
| 3518 SmiTag(index, hash); |
| 3519 } |
| 3520 |
| 3521 |
| 3522 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 3523 Register scratch, |
| 3524 Label* miss) { |
| 3525 // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function. |
| 3526 // The ARM version takes two scratch registers, and that should be enough for |
| 3527 // all of the checks. |
| 3528 |
| 3529 Label same_contexts; |
| 3530 |
| 3531 ASSERT(!AreAliased(holder_reg, scratch)); |
| 3532 |
| 3533 // Load current lexical context from the stack frame. |
| 3534 Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3535 // In debug mode, make sure the lexical context is set. |
| 3536 #ifdef DEBUG |
| 3537 Cmp(scratch, 0); |
| 3538 Check(ne, "we should not have an empty lexical context"); |
| 3539 #endif |
| 3540 |
| 3541 // Load the native context of the current context. |
| 3542 int offset = |
| 3543 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
| 3544 Ldr(scratch, FieldMemOperand(scratch, offset)); |
| 3545 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
| 3546 |
| 3547 // Check the context is a native context. |
| 3548 if (emit_debug_code()) { |
| 3549 // Read the first word and compare to the global_context_map. |
| 3550 Register temp = Tmp1(); |
| 3551 Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 3552 CompareRoot(temp, Heap::kNativeContextMapRootIndex); |
| 3553 Check(eq, "JSGlobalObject::native_context should be a native context."); |
| 3554 } |
| 3555 |
| 3556 // Check if both contexts are the same. |
| 3557 ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); |
| 3558 cmp(scratch, Tmp0()); |
| 3559 b(&same_contexts, eq); |
| 3560 |
| 3561 // Check the context is a native context. |
| 3562 if (emit_debug_code()) { |
| 3563 // Move Tmp0() into a different register, as CompareRoot will use it. |
| 3564 Register temp = Tmp1(); |
| 3565 mov(temp, Tmp0()); |
| 3566 CompareRoot(temp, Heap::kNullValueRootIndex); |
| 3567 Check(ne, "JSGlobalProxy::context() should not be null."); |
| 3568 |
| 3569 Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset)); |
| 3570 CompareRoot(temp, Heap::kNativeContextMapRootIndex); |
| 3571 Check(eq, "JSGlobalObject::native_context should be a native context."); |
| 3572 |
| 3573 // Let's consider that Tmp0() has been cloberred by the MacroAssembler. |
| 3574 // We reload it with its value. |
| 3575 ldr(Tmp0(), FieldMemOperand(holder_reg, |
| 3576 JSGlobalProxy::kNativeContextOffset)); |
| 3577 } |
| 3578 |
| 3579 // Check that the security token in the calling global object is |
| 3580 // compatible with the security token in the receiving global |
| 3581 // object. |
| 3582 int token_offset = Context::kHeaderSize + |
| 3583 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
| 3584 |
| 3585 ldr(scratch, FieldMemOperand(scratch, token_offset)); |
| 3586 ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset)); |
| 3587 cmp(scratch, Tmp0()); |
| 3588 b(miss, ne); |
| 3589 |
| 3590 bind(&same_contexts); |
| 3591 } |
| 3592 |
| 3593 |
| 3594 void MacroAssembler::GetNumberHash(Register key, Register scratch) { |
| 3595 ASSERT(!AreAliased(key, scratch)); |
| 3596 |
| 3597 // Xor original key with a seed. |
| 3598 LoadRoot(scratch, Heap::kHashSeedRootIndex); |
| 3599 Eor(key, key, Operand::UntagSmi(scratch)); |
| 3600 |
| 3601 // The algorithm uses 32-bit integer values. |
| 3602 key = key.W(); |
| 3603 scratch = scratch.W(); |
| 3604 |
| 3605 // Compute the hash code from the untagged key. This must be kept in sync |
| 3606 // with ComputeIntegerHash in utils.h. |
| 3607 // |
| 3608 // hash = ~hash + (hash <<1 15); |
| 3609 Mvn(scratch, key); |
| 3610 Add(key, scratch, Operand(key, LSL, 15)); |
| 3611 // hash = hash ^ (hash >> 12); |
| 3612 Eor(key, key, Operand(key, LSR, 12)); |
| 3613 // hash = hash + (hash << 2); |
| 3614 Add(key, key, Operand(key, LSL, 2)); |
| 3615 // hash = hash ^ (hash >> 4); |
| 3616 Eor(key, key, Operand(key, LSR, 4)); |
| 3617 // hash = hash * 2057; |
| 3618 Mov(scratch, Operand(key, LSL, 11)); |
| 3619 Add(key, key, Operand(key, LSL, 3)); |
| 3620 Add(key, key, scratch); |
| 3621 // hash = hash ^ (hash >> 16); |
| 3622 Eor(key, key, Operand(key, LSR, 16)); |
| 3623 } |
| 3624 |
| 3625 |
| 3626 void MacroAssembler::LoadFromNumberDictionary(Label* miss, |
| 3627 Register elements, |
| 3628 Register key, |
| 3629 Register result, |
| 3630 Register scratch0, |
| 3631 Register scratch1, |
| 3632 Register scratch2, |
| 3633 Register scratch3) { |
| 3634 ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3)); |
| 3635 |
| 3636 Label done; |
| 3637 |
| 3638 SmiUntag(scratch0, key); |
| 3639 GetNumberHash(scratch0, scratch1); |
| 3640 |
| 3641 // Compute the capacity mask. |
| 3642 Ldrsw(scratch1, |
| 3643 UntagSmiFieldMemOperand(elements, |
| 3644 SeededNumberDictionary::kCapacityOffset)); |
| 3645 Sub(scratch1, scratch1, 1); |
| 3646 |
| 3647 // Generate an unrolled loop that performs a few probes before giving up. |
| 3648 static const int kProbes = 4; |
| 3649 for (int i = 0; i < kProbes; i++) { |
| 3650 // Compute the masked index: (hash + i + i * i) & mask. |
| 3651 if (i > 0) { |
| 3652 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i)); |
| 3653 } else { |
| 3654 Mov(scratch2, scratch0); |
| 3655 } |
| 3656 And(scratch2, scratch2, scratch1); |
| 3657 |
| 3658 // Scale the index by multiplying by the element size. |
| 3659 ASSERT(SeededNumberDictionary::kEntrySize == 3); |
| 3660 Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); |
| 3661 |
| 3662 // Check if the key is identical to the name. |
| 3663 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); |
| 3664 Ldr(scratch3, |
| 3665 FieldMemOperand(scratch2, |
| 3666 SeededNumberDictionary::kElementsStartOffset)); |
| 3667 Cmp(key, scratch3); |
| 3668 if (i != kProbes - 1) { |
| 3669 B(eq, &done); |
| 3670 } else { |
| 3671 B(ne, miss); |
| 3672 } |
| 3673 } |
| 3674 |
| 3675 Bind(&done); |
| 3676 // Check that the value is a normal property. |
| 3677 const int kDetailsOffset = |
| 3678 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; |
| 3679 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset)); |
| 3680 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss); |
| 3681 |
| 3682 // Get the value at the masked, scaled index and return. |
| 3683 const int kValueOffset = |
| 3684 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
| 3685 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); |
| 3686 } |
| 3687 |
| 3688 |
| 3689 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. |
| 3690 Register address, |
| 3691 Register scratch, |
| 3692 SaveFPRegsMode fp_mode, |
| 3693 RememberedSetFinalAction and_then) { |
| 3694 ASSERT(!AreAliased(object, address, scratch)); |
| 3695 Label done, store_buffer_overflow; |
| 3696 if (emit_debug_code()) { |
| 3697 Label ok; |
| 3698 JumpIfNotInNewSpace(object, &ok); |
| 3699 Abort("Remembered set pointer is in new space"); |
| 3700 bind(&ok); |
| 3701 } |
| 3702 // Load store buffer top. |
| 3703 Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate()))); |
| 3704 Ldr(scratch, MemOperand(Tmp0())); |
| 3705 // Store pointer to buffer and increment buffer top. |
| 3706 Str(address, MemOperand(scratch, kPointerSize, PostIndex)); |
| 3707 // Write back new top of buffer. |
| 3708 Str(scratch, MemOperand(Tmp0())); |
| 3709 // Call stub on end of buffer. |
| 3710 // Check for end of buffer. |
| 3711 ASSERT(StoreBuffer::kStoreBufferOverflowBit == |
| 3712 (1 << (14 + kPointerSizeLog2))); |
| 3713 if (and_then == kFallThroughAtEnd) { |
| 3714 Tbz(scratch, (14 + kPointerSizeLog2), &done); |
| 3715 } else { |
| 3716 ASSERT(and_then == kReturnAtEnd); |
| 3717 Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow); |
| 3718 Ret(); |
| 3719 } |
| 3720 |
| 3721 Bind(&store_buffer_overflow); |
| 3722 Push(lr); |
| 3723 StoreBufferOverflowStub store_buffer_overflow_stub = |
| 3724 StoreBufferOverflowStub(fp_mode); |
| 3725 CallStub(&store_buffer_overflow_stub); |
| 3726 Pop(lr); |
| 3727 |
| 3728 Bind(&done); |
| 3729 if (and_then == kReturnAtEnd) { |
| 3730 Ret(); |
| 3731 } |
| 3732 } |
| 3733 |
| 3734 |
| 3735 void MacroAssembler::PopSafepointRegisters() { |
| 3736 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
| 3737 PopXRegList(kSafepointSavedRegisters); |
| 3738 Drop(num_unsaved); |
| 3739 } |
| 3740 |
| 3741 |
| 3742 void MacroAssembler::PushSafepointRegisters() { |
| 3743 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so |
| 3744 // adjust the stack for unsaved registers. |
| 3745 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
| 3746 ASSERT(num_unsaved >= 0); |
| 3747 Claim(num_unsaved); |
| 3748 PushXRegList(kSafepointSavedRegisters); |
| 3749 } |
| 3750 |
| 3751 |
| 3752 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
| 3753 // Make sure the safepoint registers list is what we expect. |
| 3754 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); |
| 3755 |
| 3756 // Safepoint registers are stored contiguously on the stack, but not all the |
| 3757 // registers are saved. The following registers are excluded: |
| 3758 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of |
| 3759 // the macro assembler. |
| 3760 // - x28 (jssp) because JS stack pointer doesn't need to be included in |
| 3761 // safepoint registers. |
| 3762 // - x31 (csp) because the system stack pointer doesn't need to be included |
| 3763 // in safepoint registers. |
| 3764 // |
| 3765 // This function implements the mapping of register code to index into the |
| 3766 // safepoint register slots. |
| 3767 if ((reg_code >= 0) && (reg_code <= 15)) { |
| 3768 return reg_code; |
| 3769 } else if ((reg_code >= 18) && (reg_code <= 27)) { |
| 3770 // Skip ip0 and ip1. |
| 3771 return reg_code - 2; |
| 3772 } else if ((reg_code == 29) || (reg_code == 30)) { |
| 3773 // Also skip jssp. |
| 3774 return reg_code - 3; |
| 3775 } else { |
| 3776 // This register has no safepoint register slot. |
| 3777 UNREACHABLE(); |
| 3778 return -1; |
| 3779 } |
| 3780 } |
| 3781 |
| 3782 |
| 3783 void MacroAssembler::CheckPageFlagSet(const Register& object, |
| 3784 const Register& scratch, |
| 3785 int mask, |
| 3786 Label* if_any_set) { |
| 3787 And(scratch, object, ~Page::kPageAlignmentMask); |
| 3788 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
| 3789 TestAndBranchIfAnySet(scratch, mask, if_any_set); |
| 3790 } |
| 3791 |
| 3792 |
| 3793 void MacroAssembler::CheckPageFlagClear(const Register& object, |
| 3794 const Register& scratch, |
| 3795 int mask, |
| 3796 Label* if_all_clear) { |
| 3797 And(scratch, object, ~Page::kPageAlignmentMask); |
| 3798 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
| 3799 TestAndBranchIfAllClear(scratch, mask, if_all_clear); |
| 3800 } |
| 3801 |
| 3802 |
| 3803 void MacroAssembler::RecordWriteField( |
| 3804 Register object, |
| 3805 int offset, |
| 3806 Register value, |
| 3807 Register scratch, |
| 3808 LinkRegisterStatus lr_status, |
| 3809 SaveFPRegsMode save_fp, |
| 3810 RememberedSetAction remembered_set_action, |
| 3811 SmiCheck smi_check, |
| 3812 PregenExpectation pregen_expectation) { |
| 3813 // First, check if a write barrier is even needed. The tests below |
| 3814 // catch stores of Smis. |
| 3815 Label done; |
| 3816 |
| 3817 // Skip the barrier if writing a smi. |
| 3818 if (smi_check == INLINE_SMI_CHECK) { |
| 3819 JumpIfSmi(value, &done); |
| 3820 } |
| 3821 |
| 3822 // Although the object register is tagged, the offset is relative to the start |
| 3823 // of the object, so offset must be a multiple of kPointerSize. |
| 3824 ASSERT(IsAligned(offset, kPointerSize)); |
| 3825 |
| 3826 Add(scratch, object, offset - kHeapObjectTag); |
| 3827 if (emit_debug_code()) { |
| 3828 Label ok; |
| 3829 Tst(scratch, (1 << kPointerSizeLog2) - 1); |
| 3830 B(eq, &ok); |
| 3831 Abort("Unaligned cell in write barrier"); |
| 3832 Bind(&ok); |
| 3833 } |
| 3834 |
| 3835 RecordWrite(object, |
| 3836 scratch, |
| 3837 value, |
| 3838 lr_status, |
| 3839 save_fp, |
| 3840 remembered_set_action, |
| 3841 OMIT_SMI_CHECK, |
| 3842 pregen_expectation); |
| 3843 |
| 3844 Bind(&done); |
| 3845 |
| 3846 // Clobber clobbered input registers when running with the debug-code flag |
| 3847 // turned on to provoke errors. |
| 3848 if (emit_debug_code()) { |
| 3849 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); |
| 3850 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); |
| 3851 } |
| 3852 } |
| 3853 |
| 3854 |
| 3855 // Will clobber: object, address, value, Tmp0(), Tmp1(). |
| 3856 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. |
| 3857 // |
| 3858 // The register 'object' contains a heap object pointer. The heap object tag is |
| 3859 // shifted away. |
| 3860 // |
| 3861 // If pregen_expectation is EXPECT_PREGENERATED, this will assert that the |
| 3862 // stub used was pregenerated. This is done to ensure that |
| 3863 // RecordWriteStub::kAheadOfTime stays in sync with real usage. If |
| 3864 // pregen_expectation is false, no assertion is made, since another call site |
| 3865 // might pregenerate the stub with the same parameters. |
| 3866 void MacroAssembler::RecordWrite(Register object, |
| 3867 Register address, |
| 3868 Register value, |
| 3869 LinkRegisterStatus lr_status, |
| 3870 SaveFPRegsMode fp_mode, |
| 3871 RememberedSetAction remembered_set_action, |
| 3872 SmiCheck smi_check, |
| 3873 PregenExpectation pregen_expectation) { |
| 3874 // The compiled code assumes that record write doesn't change the |
| 3875 // context register, so we check that none of the clobbered |
| 3876 // registers are cp. |
| 3877 ASSERT(!address.is(cp) && !value.is(cp)); |
| 3878 |
| 3879 if (emit_debug_code()) { |
| 3880 Ldr(Tmp0(), MemOperand(address)); |
| 3881 Cmp(Tmp0(), value); |
| 3882 Check(eq, "Wrong address or value passed to RecordWrite."); |
| 3883 } |
| 3884 |
| 3885 Label done; |
| 3886 |
| 3887 if (smi_check == INLINE_SMI_CHECK) { |
| 3888 ASSERT_EQ(0, kSmiTag); |
| 3889 JumpIfSmi(value, &done); |
| 3890 } |
| 3891 |
| 3892 CheckPageFlagClear(value, |
| 3893 value, // Used as scratch. |
| 3894 MemoryChunk::kPointersToHereAreInterestingMask, |
| 3895 &done); |
| 3896 CheckPageFlagClear(object, |
| 3897 value, // Used as scratch. |
| 3898 MemoryChunk::kPointersFromHereAreInterestingMask, |
| 3899 &done); |
| 3900 |
| 3901 // Record the actual write. |
| 3902 if (lr_status == kLRHasNotBeenSaved) { |
| 3903 Push(lr); |
| 3904 } |
| 3905 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); |
| 3906 if (pregen_expectation == EXPECT_PREGENERATED) { |
| 3907 // If we expected a pregenerated stub, ensure that we get one. |
| 3908 // A failure at this assertion probably indicates that the |
| 3909 // RecordWriteStub::kAheadOfTime list needs to be updated. |
| 3910 ASSERT(stub.IsPregenerated()); |
| 3911 } |
| 3912 CallStub(&stub); |
| 3913 if (lr_status == kLRHasNotBeenSaved) { |
| 3914 Pop(lr); |
| 3915 } |
| 3916 |
| 3917 Bind(&done); |
| 3918 |
| 3919 // Clobber clobbered registers when running with the debug-code flag |
| 3920 // turned on to provoke errors. |
| 3921 if (emit_debug_code()) { |
| 3922 Mov(address, Operand(BitCast<int64_t>(kZapValue + 12))); |
| 3923 Mov(value, Operand(BitCast<int64_t>(kZapValue + 16))); |
| 3924 } |
| 3925 } |
| 3926 |
| 3927 |
| 3928 void MacroAssembler::AssertHasValidColor(const Register& reg) { |
| 3929 if (emit_debug_code()) { |
| 3930 // The bit sequence is backward. The first character in the string |
| 3931 // represents the least significant bit. |
| 3932 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| 3933 |
| 3934 Label color_is_valid; |
| 3935 Tbnz(reg, 0, &color_is_valid); |
| 3936 Tbz(reg, 1, &color_is_valid); |
| 3937 Abort("Impossible color bit pattern found."); |
| 3938 Bind(&color_is_valid); |
| 3939 } |
| 3940 } |
| 3941 |
| 3942 |
| 3943 void MacroAssembler::GetMarkBits(Register addr_reg, |
| 3944 Register bitmap_reg, |
| 3945 Register shift_reg) { |
| 3946 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg)); |
| 3947 // addr_reg is divided into fields: |
| 3948 // |63 page base 20|19 high 8|7 shift 3|2 0| |
| 3949 // 'high' gives the index of the cell holding color bits for the object. |
| 3950 // 'shift' gives the offset in the cell for this object's color. |
| 3951 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
| 3952 Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits); |
| 3953 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); |
| 3954 Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2)); |
| 3955 // bitmap_reg: |
| 3956 // |63 page base 20|19 zeros 15|14 high 3|2 0| |
| 3957 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); |
| 3958 } |
| 3959 |
| 3960 |
| 3961 void MacroAssembler::HasColor(Register object, |
| 3962 Register bitmap_scratch, |
| 3963 Register shift_scratch, |
| 3964 Label* has_color, |
| 3965 int first_bit, |
| 3966 int second_bit) { |
| 3967 // See mark-compact.h for color definitions. |
| 3968 ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch)); |
| 3969 |
| 3970 GetMarkBits(object, bitmap_scratch, shift_scratch); |
| 3971 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 3972 // Shift the bitmap down to get the color of the object in bits [1:0]. |
| 3973 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch); |
| 3974 |
| 3975 AssertHasValidColor(bitmap_scratch); |
| 3976 |
| 3977 // These bit sequences are backwards. The first character in the string |
| 3978 // represents the least significant bit. |
| 3979 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| 3980 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 3981 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| 3982 |
| 3983 // Check for the color. |
| 3984 if (first_bit == 0) { |
| 3985 // Checking for white. |
| 3986 ASSERT(second_bit == 0); |
| 3987 // We only need to test the first bit. |
| 3988 Tbz(bitmap_scratch, 0, has_color); |
| 3989 } else { |
| 3990 Label other_color; |
| 3991 // Checking for grey or black. |
| 3992 Tbz(bitmap_scratch, 0, &other_color); |
| 3993 if (second_bit == 0) { |
| 3994 Tbz(bitmap_scratch, 1, has_color); |
| 3995 } else { |
| 3996 Tbnz(bitmap_scratch, 1, has_color); |
| 3997 } |
| 3998 Bind(&other_color); |
| 3999 } |
| 4000 |
| 4001 // Fall through if it does not have the right color. |
| 4002 } |
| 4003 |
| 4004 |
| 4005 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, |
| 4006 Register scratch, |
| 4007 Label* if_deprecated) { |
| 4008 if (map->CanBeDeprecated()) { |
| 4009 Mov(scratch, Operand(map)); |
| 4010 Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset)); |
| 4011 TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated); |
| 4012 } |
| 4013 } |
| 4014 |
| 4015 |
| 4016 void MacroAssembler::JumpIfBlack(Register object, |
| 4017 Register scratch0, |
| 4018 Register scratch1, |
| 4019 Label* on_black) { |
| 4020 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 4021 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. |
| 4022 } |
| 4023 |
| 4024 |
| 4025 void MacroAssembler::EnsureNotWhite( |
| 4026 Register value, |
| 4027 Register bitmap_scratch, |
| 4028 Register shift_scratch, |
| 4029 Register load_scratch, |
| 4030 Register length_scratch, |
| 4031 Label* value_is_white_and_not_data) { |
| 4032 ASSERT(!AreAliased( |
| 4033 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch)); |
| 4034 |
| 4035 // These bit sequences are backwards. The first character in the string |
| 4036 // represents the least significant bit. |
| 4037 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| 4038 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 4039 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| 4040 |
| 4041 GetMarkBits(value, bitmap_scratch, shift_scratch); |
| 4042 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 4043 Lsr(load_scratch, load_scratch, shift_scratch); |
| 4044 |
| 4045 AssertHasValidColor(load_scratch); |
| 4046 |
| 4047 // If the value is black or grey we don't need to do anything. |
| 4048 // Since both black and grey have a 1 in the first position and white does |
| 4049 // not have a 1 there we only need to check one bit. |
| 4050 Label done; |
| 4051 Tbnz(load_scratch, 0, &done); |
| 4052 |
| 4053 // Value is white. We check whether it is data that doesn't need scanning. |
| 4054 Register map = load_scratch; // Holds map while checking type. |
| 4055 Label is_data_object; |
| 4056 |
| 4057 // Check for heap-number. |
| 4058 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 4059 Mov(length_scratch, HeapNumber::kSize); |
| 4060 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object); |
| 4061 |
| 4062 // Check for strings. |
| 4063 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
| 4064 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
| 4065 // If it's a string and it's not a cons string then it's an object containing |
| 4066 // no GC pointers. |
| 4067 Register instance_type = load_scratch; |
| 4068 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 4069 TestAndBranchIfAnySet(instance_type, |
| 4070 kIsIndirectStringMask | kIsNotStringMask, |
| 4071 value_is_white_and_not_data); |
| 4072 |
| 4073 // It's a non-indirect (non-cons and non-slice) string. |
| 4074 // If it's external, the length is just ExternalString::kSize. |
| 4075 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). |
| 4076 // External strings are the only ones with the kExternalStringTag bit |
| 4077 // set. |
| 4078 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); |
| 4079 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); |
| 4080 Mov(length_scratch, ExternalString::kSize); |
| 4081 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object); |
| 4082 |
| 4083 // Sequential string, either ASCII or UC16. |
| 4084 // For ASCII (char-size of 1) we shift the smi tag away to get the length. |
| 4085 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby |
| 4086 // getting the length multiplied by 2. |
| 4087 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); |
| 4088 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value, |
| 4089 String::kLengthOffset)); |
| 4090 Tst(instance_type, kStringEncodingMask); |
| 4091 Cset(load_scratch, eq); |
| 4092 Lsl(length_scratch, length_scratch, load_scratch); |
| 4093 Add(length_scratch, |
| 4094 length_scratch, |
| 4095 SeqString::kHeaderSize + kObjectAlignmentMask); |
| 4096 Bic(length_scratch, length_scratch, kObjectAlignmentMask); |
| 4097 |
| 4098 Bind(&is_data_object); |
| 4099 // Value is a data object, and it is white. Mark it black. Since we know |
| 4100 // that the object is white we can make it black by flipping one bit. |
| 4101 Register mask = shift_scratch; |
| 4102 Mov(load_scratch, 1); |
| 4103 Lsl(mask, load_scratch, shift_scratch); |
| 4104 |
| 4105 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 4106 Orr(load_scratch, load_scratch, mask); |
| 4107 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 4108 |
| 4109 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask); |
| 4110 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
| 4111 Add(load_scratch, load_scratch, length_scratch); |
| 4112 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
| 4113 |
| 4114 Bind(&done); |
| 4115 } |
| 4116 |
| 4117 |
| 4118 void MacroAssembler::Assert(Condition cond, const char* msg) { |
| 4119 if (emit_debug_code()) { |
| 4120 Check(cond, msg); |
| 4121 } |
| 4122 } |
| 4123 |
| 4124 |
| 4125 |
| 4126 void MacroAssembler::AssertRegisterIsClear(Register reg, const char* msg) { |
| 4127 if (emit_debug_code()) { |
| 4128 CheckRegisterIsClear(reg, msg); |
| 4129 } |
| 4130 } |
| 4131 |
| 4132 |
| 4133 void MacroAssembler::AssertRegisterIsRoot(Register reg, |
| 4134 Heap::RootListIndex index) { |
| 4135 // CompareRoot uses Tmp0(). |
| 4136 ASSERT(!reg.Is(Tmp0())); |
| 4137 if (emit_debug_code()) { |
| 4138 CompareRoot(reg, index); |
| 4139 Check(eq, "Register did not match expected root"); |
| 4140 } |
| 4141 } |
| 4142 |
| 4143 |
| 4144 void MacroAssembler::AssertFastElements(Register elements) { |
| 4145 if (emit_debug_code()) { |
| 4146 Register temp = Tmp1(); |
| 4147 Label ok; |
| 4148 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 4149 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); |
| 4150 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); |
| 4151 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); |
| 4152 Abort("JSObject with fast elements map has slow elements"); |
| 4153 Bind(&ok); |
| 4154 } |
| 4155 } |
| 4156 |
| 4157 |
| 4158 void MacroAssembler::AssertIsString(const Register& object) { |
| 4159 if (emit_debug_code()) { |
| 4160 Register temp = Tmp1(); |
| 4161 STATIC_ASSERT(kSmiTag == 0); |
| 4162 Tst(object, Operand(kSmiTagMask)); |
| 4163 Check(ne, "Operand is not a string"); |
| 4164 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 4165 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); |
| 4166 Check(lo, "Operand is not a string"); |
| 4167 } |
| 4168 } |
| 4169 |
| 4170 |
| 4171 void MacroAssembler::Check(Condition cond, const char* msg) { |
| 4172 Label ok; |
| 4173 B(cond, &ok); |
| 4174 Abort(msg); |
| 4175 // Will not return here. |
| 4176 Bind(&ok); |
| 4177 } |
| 4178 |
| 4179 |
| 4180 void MacroAssembler::CheckRegisterIsClear(Register reg, const char* msg) { |
| 4181 Label ok; |
| 4182 Cbz(reg, &ok); |
| 4183 Abort(msg); |
| 4184 // Will not return here. |
| 4185 Bind(&ok); |
| 4186 } |
| 4187 |
| 4188 |
| 4189 void MacroAssembler::Abort(const char* msg) { |
| 4190 #ifdef DEBUG |
| 4191 if (msg != NULL) { |
| 4192 RecordComment("Abort message: "); |
| 4193 RecordComment(msg); |
| 4194 } |
| 4195 #endif |
| 4196 |
| 4197 Label msg_address; |
| 4198 Adr(x0, &msg_address); |
| 4199 |
| 4200 if (use_real_aborts()) { |
| 4201 // Split the message pointer into two SMI to avoid the GC |
| 4202 // trying to scan the string. |
| 4203 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); |
| 4204 SmiTag(x1, x0); |
| 4205 Bic(x0, x0, kSmiShiftMask); |
| 4206 |
| 4207 Push(x0, x1); |
| 4208 |
| 4209 if (!has_frame_) { |
| 4210 // We don't actually want to generate a pile of code for this, so just |
| 4211 // claim there is a stack frame, without generating one. |
| 4212 FrameScope scope(this, StackFrame::NONE); |
| 4213 CallRuntime(Runtime::kAbort, 2); |
| 4214 } else { |
| 4215 CallRuntime(Runtime::kAbort, 2); |
| 4216 } |
| 4217 } else { |
| 4218 // Call Printf directly, to report the error. The message is in x0, which is |
| 4219 // the first argument to Printf. |
| 4220 if (!csp.Is(StackPointer())) { |
| 4221 Bic(csp, StackPointer(), 0xf); |
| 4222 } |
| 4223 CallPrintf(); |
| 4224 |
| 4225 // The CallPrintf will return, so this point is actually reachable in this |
| 4226 // context. However: |
| 4227 // - We're already executing an abort (which shouldn't be reachable in |
| 4228 // valid code). |
| 4229 // - We need a way to stop execution on both the simulator and real |
| 4230 // hardware, and Unreachable() is the best option. |
| 4231 Unreachable(); |
| 4232 } |
| 4233 |
| 4234 // Emit the message string directly in the instruction stream. |
| 4235 { |
| 4236 BlockConstPoolScope scope(this); |
| 4237 Bind(&msg_address); |
| 4238 EmitStringData(msg); |
| 4239 } |
| 4240 } |
| 4241 |
| 4242 |
| 4243 void MacroAssembler::LoadTransitionedArrayMapConditional( |
| 4244 ElementsKind expected_kind, |
| 4245 ElementsKind transitioned_kind, |
| 4246 Register map_in_out, |
| 4247 Register scratch, |
| 4248 Label* no_map_match) { |
| 4249 // Load the global or builtins object from the current context. |
| 4250 Ldr(scratch, GlobalObjectMemOperand()); |
| 4251 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
| 4252 |
| 4253 // Check that the function's map is the same as the expected cached map. |
| 4254 Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX)); |
| 4255 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; |
| 4256 Ldr(Tmp0(), FieldMemOperand(scratch, offset)); |
| 4257 Cmp(map_in_out, Tmp0()); |
| 4258 B(ne, no_map_match); |
| 4259 |
| 4260 // Use the transitioned cached map. |
| 4261 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; |
| 4262 Ldr(map_in_out, FieldMemOperand(scratch, offset)); |
| 4263 } |
| 4264 |
| 4265 |
| 4266 void MacroAssembler::LoadInitialArrayMap(Register function_in, |
| 4267 Register scratch, |
| 4268 Register map_out, |
| 4269 ArrayHasHoles holes) { |
| 4270 ASSERT(!AreAliased(function_in, scratch, map_out)); |
| 4271 Label done; |
| 4272 Ldr(map_out, FieldMemOperand(function_in, |
| 4273 JSFunction::kPrototypeOrInitialMapOffset)); |
| 4274 |
| 4275 if (!FLAG_smi_only_arrays) { |
| 4276 ElementsKind kind = (holes == kArrayCanHaveHoles) ? FAST_HOLEY_ELEMENTS |
| 4277 : FAST_ELEMENTS; |
| 4278 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, kind, map_out, |
| 4279 scratch, &done); |
| 4280 } else if (holes == kArrayCanHaveHoles) { |
| 4281 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, |
| 4282 FAST_HOLEY_SMI_ELEMENTS, map_out, |
| 4283 scratch, &done); |
| 4284 } |
| 4285 Bind(&done); |
| 4286 } |
| 4287 |
| 4288 |
| 4289 void MacroAssembler::LoadArrayFunction(Register function) { |
| 4290 // Load the global or builtins object from the current context. |
| 4291 Ldr(function, GlobalObjectMemOperand()); |
| 4292 // Load the global context from the global or builtins object. |
| 4293 Ldr(function, |
| 4294 FieldMemOperand(function, GlobalObject::kGlobalContextOffset)); |
| 4295 // Load the array function from the native context. |
| 4296 Ldr(function, ContextMemOperand(function, Context::ARRAY_FUNCTION_INDEX)); |
| 4297 } |
| 4298 |
| 4299 |
| 4300 void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
| 4301 // Load the global or builtins object from the current context. |
| 4302 Ldr(function, GlobalObjectMemOperand()); |
| 4303 // Load the native context from the global or builtins object. |
| 4304 Ldr(function, FieldMemOperand(function, |
| 4305 GlobalObject::kNativeContextOffset)); |
| 4306 // Load the function from the native context. |
| 4307 Ldr(function, ContextMemOperand(function, index)); |
| 4308 } |
| 4309 |
| 4310 |
| 4311 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, |
| 4312 Register map, |
| 4313 Register scratch) { |
| 4314 // Load the initial map. The global functions all have initial maps. |
| 4315 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 4316 if (emit_debug_code()) { |
| 4317 Label ok, fail; |
| 4318 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); |
| 4319 B(&ok); |
| 4320 Bind(&fail); |
| 4321 Abort("Global function must have initial map"); |
| 4322 Bind(&ok); |
| 4323 } |
| 4324 } |
| 4325 |
| 4326 |
| 4327 // This is the main Printf implementation. All other Printf variants call |
| 4328 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. |
| 4329 void MacroAssembler::PrintfNoPreserve(const char * format, |
| 4330 const CPURegister& arg0, |
| 4331 const CPURegister& arg1, |
| 4332 const CPURegister& arg2, |
| 4333 const CPURegister& arg3) { |
| 4334 // We cannot handle a caller-saved stack pointer. It doesn't make much sense |
| 4335 // in most cases anyway, so this restriction shouldn't be too serious. |
| 4336 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); |
| 4337 |
| 4338 // We cannot print Tmp0() or Tmp1() as they're used internally by the macro |
| 4339 // assembler. We cannot print the stack pointer because it is typically used |
| 4340 // to preserve caller-saved registers (using other Printf variants which |
| 4341 // depend on this helper). |
| 4342 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0)); |
| 4343 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1)); |
| 4344 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2)); |
| 4345 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3)); |
| 4346 |
| 4347 static const int kMaxArgCount = 4; |
| 4348 // Assume that we have the maximum number of arguments until we know |
| 4349 // otherwise. |
| 4350 int arg_count = kMaxArgCount; |
| 4351 |
| 4352 // The provided arguments. |
| 4353 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; |
| 4354 |
| 4355 // The PCS registers where the arguments need to end up. |
| 4356 CPURegister pcs[kMaxArgCount]; |
| 4357 |
| 4358 // Promote FP arguments to doubles, and integer arguments to X registers. |
| 4359 // Note that FP and integer arguments cannot be mixed, but we'll check |
| 4360 // AreSameSizeAndType once we've processed these promotions. |
| 4361 for (int i = 0; i < kMaxArgCount; i++) { |
| 4362 if (args[i].IsRegister()) { |
| 4363 // Note that we use x1 onwards, because x0 will hold the format string. |
| 4364 pcs[i] = Register::XRegFromCode(i + 1); |
| 4365 // For simplicity, we handle all integer arguments as X registers. An X |
| 4366 // register argument takes the same space as a W register argument in the |
| 4367 // PCS anyway. The only limitation is that we must explicitly clear the |
| 4368 // top word for W register arguments as the callee will expect it to be |
| 4369 // clear. |
| 4370 if (!args[i].Is64Bits()) { |
| 4371 const Register& as_x = args[i].X(); |
| 4372 And(as_x, as_x, 0x00000000ffffffff); |
| 4373 args[i] = as_x; |
| 4374 } |
| 4375 } else if (args[i].IsFPRegister()) { |
| 4376 pcs[i] = FPRegister::DRegFromCode(i); |
| 4377 // C and C++ varargs functions (such as printf) implicitly promote float |
| 4378 // arguments to doubles. |
| 4379 if (!args[i].Is64Bits()) { |
| 4380 FPRegister s(args[i]); |
| 4381 const FPRegister& as_d = args[i].D(); |
| 4382 Fcvt(as_d, s); |
| 4383 args[i] = as_d; |
| 4384 } |
| 4385 } else { |
| 4386 // This is the first empty (NoCPUReg) argument, so use it to set the |
| 4387 // argument count and bail out. |
| 4388 arg_count = i; |
| 4389 break; |
| 4390 } |
| 4391 } |
| 4392 ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount)); |
| 4393 // Check that every remaining argument is NoCPUReg. |
| 4394 for (int i = arg_count; i < kMaxArgCount; i++) { |
| 4395 ASSERT(args[i].IsNone()); |
| 4396 } |
| 4397 ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1], |
| 4398 args[2], args[3], |
| 4399 pcs[0], pcs[1], |
| 4400 pcs[2], pcs[3])); |
| 4401 |
| 4402 // Move the arguments into the appropriate PCS registers. |
| 4403 // |
| 4404 // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is |
| 4405 // surprisingly complicated. |
| 4406 // |
| 4407 // * For even numbers of registers, we push the arguments and then pop them |
| 4408 // into their final registers. This maintains 16-byte stack alignment in |
| 4409 // case csp is the stack pointer, since we're only handling X or D |
| 4410 // registers at this point. |
| 4411 // |
| 4412 // * For odd numbers of registers, we push and pop all but one register in |
| 4413 // the same way, but the left-over register is moved directly, since we |
| 4414 // can always safely move one register without clobbering any source. |
| 4415 if (arg_count >= 4) { |
| 4416 Push(args[3], args[2], args[1], args[0]); |
| 4417 } else if (arg_count >= 2) { |
| 4418 Push(args[1], args[0]); |
| 4419 } |
| 4420 |
| 4421 if ((arg_count % 2) != 0) { |
| 4422 // Move the left-over register directly. |
| 4423 const CPURegister& leftover_arg = args[arg_count - 1]; |
| 4424 const CPURegister& leftover_pcs = pcs[arg_count - 1]; |
| 4425 if (leftover_arg.IsRegister()) { |
| 4426 Mov(Register(leftover_pcs), Register(leftover_arg)); |
| 4427 } else { |
| 4428 Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg)); |
| 4429 } |
| 4430 } |
| 4431 |
| 4432 if (arg_count >= 4) { |
| 4433 Pop(pcs[0], pcs[1], pcs[2], pcs[3]); |
| 4434 } else if (arg_count >= 2) { |
| 4435 Pop(pcs[0], pcs[1]); |
| 4436 } |
| 4437 |
| 4438 // Load the format string into x0, as per the procedure-call standard. |
| 4439 // |
| 4440 // To make the code as portable as possible, the format string is encoded |
| 4441 // directly in the instruction stream. It might be cleaner to encode it in a |
| 4442 // literal pool, but since Printf is usually used for debugging, it is |
| 4443 // beneficial for it to be minimally dependent on other features. |
| 4444 Label format_address; |
| 4445 Adr(x0, &format_address); |
| 4446 |
| 4447 // Emit the format string directly in the instruction stream. |
| 4448 { BlockConstPoolScope scope(this); |
| 4449 Label after_data; |
| 4450 B(&after_data); |
| 4451 Bind(&format_address); |
| 4452 EmitStringData(format); |
| 4453 Unreachable(); |
| 4454 Bind(&after_data); |
| 4455 } |
| 4456 |
| 4457 // We don't pass any arguments on the stack, but we still need to align the C |
| 4458 // stack pointer to a 16-byte boundary for PCS compliance. |
| 4459 if (!csp.Is(StackPointer())) { |
| 4460 Bic(csp, StackPointer(), 0xf); |
| 4461 } |
| 4462 |
| 4463 CallPrintf(pcs[0].type()); |
| 4464 } |
| 4465 |
| 4466 |
| 4467 void MacroAssembler::CallPrintf(CPURegister::RegisterType type) { |
| 4468 // A call to printf needs special handling for the simulator, since the system |
| 4469 // printf function will use a different instruction set and the procedure-call |
| 4470 // standard will not be compatible. |
| 4471 #ifdef USE_SIMULATOR |
| 4472 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize); |
| 4473 hlt(kImmExceptionIsPrintf); |
| 4474 dc32(type); |
| 4475 } |
| 4476 #else |
| 4477 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); |
| 4478 #endif |
| 4479 } |
| 4480 |
| 4481 |
| 4482 void MacroAssembler::Printf(const char * format, |
| 4483 const CPURegister& arg0, |
| 4484 const CPURegister& arg1, |
| 4485 const CPURegister& arg2, |
| 4486 const CPURegister& arg3) { |
| 4487 // Preserve all caller-saved registers as well as NZCV. |
| 4488 // If csp is the stack pointer, PushCPURegList asserts that the size of each |
| 4489 // list is a multiple of 16 bytes. |
| 4490 PushCPURegList(kCallerSaved); |
| 4491 PushCPURegList(kCallerSavedFP); |
| 4492 // Use Tmp0() as a scratch register. It is not accepted by Printf so it will |
| 4493 // never overlap an argument register. |
| 4494 Mrs(Tmp0(), NZCV); |
| 4495 Push(Tmp0(), xzr); |
| 4496 |
| 4497 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); |
| 4498 |
| 4499 Pop(xzr, Tmp0()); |
| 4500 Msr(NZCV, Tmp0()); |
| 4501 PopCPURegList(kCallerSavedFP); |
| 4502 PopCPURegList(kCallerSaved); |
| 4503 } |
| 4504 |
| 4505 |
| 4506 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { |
| 4507 // TODO(jbramley): Other architectures use the internal memcpy to copy the |
| 4508 // sequence. If this is a performance bottleneck, we should consider caching |
| 4509 // the sequence and copying it in the same way. |
| 4510 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); |
| 4511 ASSERT(jssp.Is(StackPointer())); |
| 4512 EmitFrameSetupForCodeAgePatching(this); |
| 4513 } |
| 4514 |
| 4515 |
| 4516 #undef __ |
| 4517 #define __ assm-> |
| 4518 |
| 4519 |
| 4520 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { |
| 4521 Label start; |
| 4522 __ bind(&start); |
| 4523 |
| 4524 // We can do this sequence using four instructions, but the code ageing |
| 4525 // sequence that patches it needs five, so we use the extra space to try to |
| 4526 // simplify some addressing modes and remove some dependencies (compared to |
| 4527 // using two stp instructions with write-back). |
| 4528 __ sub(jssp, jssp, 4 * kXRegSizeInBytes); |
| 4529 __ sub(csp, csp, 4 * kXRegSizeInBytes); |
| 4530 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes)); |
| 4531 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes)); |
| 4532 __ add(fp, jssp, 2 * kXRegSizeInBytes); |
| 4533 |
| 4534 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); |
| 4535 } |
| 4536 |
| 4537 |
| 4538 void MacroAssembler::EmitCodeAgeSequence(PatchingAssembler * assm, |
| 4539 Code * stub) { |
| 4540 Label start; |
| 4541 __ bind(&start); |
| 4542 // When the stub is called, the sequence is replaced with the young sequence |
| 4543 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the |
| 4544 // stub jumps to &start, stored in x0. The young sequence does not call the |
| 4545 // stub so there is no infinite loop here. |
| 4546 // |
| 4547 // A branch (br) is used rather than a call (blr) because this code replaces |
| 4548 // the frame setup code that would normally preserve lr. |
| 4549 __ LoadLiteral(ip0, kCodeAgeStubEntryOffset); |
| 4550 __ adr(x0, &start); |
| 4551 __ br(ip0); |
| 4552 // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up |
| 4553 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences. |
| 4554 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); |
| 4555 if (stub) { |
| 4556 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); |
| 4557 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); |
| 4558 } |
| 4559 } |
| 4560 |
| 4561 |
| 4562 bool MacroAssembler::IsYoungSequence(byte* sequence) { |
| 4563 // Generate a young sequence to compare with. |
| 4564 const int length = kCodeAgeSequenceSize / kInstructionSize; |
| 4565 static bool initialized = false; |
| 4566 static byte young[kCodeAgeSequenceSize]; |
| 4567 if (!initialized) { |
| 4568 PatchingAssembler patcher(young, length); |
| 4569 // The young sequence is the frame setup code for FUNCTION code types. It is |
| 4570 // generated by FullCodeGenerator::Generate. |
| 4571 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); |
| 4572 initialized = true; |
| 4573 } |
| 4574 |
| 4575 bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0); |
| 4576 ASSERT(is_young || IsCodeAgeSequence(sequence)); |
| 4577 return is_young; |
| 4578 } |
| 4579 |
| 4580 |
| 4581 #ifdef DEBUG |
| 4582 bool MacroAssembler::IsCodeAgeSequence(byte* sequence) { |
| 4583 // The old sequence varies depending on the code age. However, the code up |
| 4584 // until kCodeAgeStubEntryOffset does not change, so we can check that part to |
| 4585 // get a reasonable level of verification. |
| 4586 const int length = kCodeAgeStubEntryOffset / kInstructionSize; |
| 4587 static bool initialized = false; |
| 4588 static byte old[kCodeAgeStubEntryOffset]; |
| 4589 if (!initialized) { |
| 4590 PatchingAssembler patcher(old, length); |
| 4591 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL); |
| 4592 initialized = true; |
| 4593 } |
| 4594 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; |
| 4595 } |
| 4596 #endif |
| 4597 |
| 4598 |
| 4599 #undef __ |
| 4600 #define __ masm-> |
| 4601 |
| 4602 |
| 4603 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, |
| 4604 const Label* smi_check) { |
| 4605 Assembler::BlockConstPoolScope scope(masm); |
| 4606 if (reg.IsValid()) { |
| 4607 ASSERT(smi_check->is_bound()); |
| 4608 ASSERT(reg.Is64Bits()); |
| 4609 |
| 4610 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to |
| 4611 // 'check' in the other bits. The possible offset is limited in that we |
| 4612 // use BitField to pack the data, and the underlying data type is a |
| 4613 // uint32_t. |
| 4614 uint32_t delta = __ InstructionsGeneratedSince(smi_check); |
| 4615 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); |
| 4616 } else { |
| 4617 ASSERT(!smi_check->is_bound()); |
| 4618 |
| 4619 // An offset of 0 indicates that there is no patch site. |
| 4620 __ InlineData(0); |
| 4621 } |
| 4622 } |
| 4623 |
| 4624 |
| 4625 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info) |
| 4626 : reg_(NoReg), smi_check_(NULL) { |
| 4627 InstructionSequence* inline_data = InstructionSequence::At(info); |
| 4628 ASSERT(inline_data->IsInlineData()); |
| 4629 if (inline_data->IsInlineData()) { |
| 4630 uint64_t payload = inline_data->InlineData(); |
| 4631 // We use BitField to decode the payload, and BitField can only handle |
| 4632 // 32-bit values. |
| 4633 ASSERT(is_uint32(payload)); |
| 4634 if (payload != 0) { |
| 4635 int reg_code = RegisterBits::decode(payload); |
| 4636 reg_ = Register::XRegFromCode(reg_code); |
| 4637 uint64_t smi_check_delta = DeltaBits::decode(payload); |
| 4638 ASSERT(smi_check_delta != 0); |
| 4639 smi_check_ = inline_data - (smi_check_delta * kInstructionSize); |
| 4640 } |
| 4641 } |
| 4642 } |
| 4643 |
| 4644 |
| 4645 #undef __ |
| 4646 |
| 4647 |
| 4648 } } // namespace v8::internal |
| 4649 |
| 4650 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |