| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 #include "v8.h" | |
| 29 | |
| 30 #if V8_TARGET_ARCH_A64 | |
| 31 | |
| 32 #include "bootstrapper.h" | |
| 33 #include "codegen.h" | |
| 34 #include "cpu-profiler.h" | |
| 35 #include "debug.h" | |
| 36 #include "isolate-inl.h" | |
| 37 #include "runtime.h" | |
| 38 | |
| 39 namespace v8 { | |
| 40 namespace internal { | |
| 41 | |
| 42 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros. | |
| 43 #define __ | |
| 44 | |
| 45 | |
| 46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, | |
| 47 byte * buffer, | |
| 48 unsigned buffer_size) | |
| 49 : Assembler(arg_isolate, buffer, buffer_size), | |
| 50 generating_stub_(false), | |
| 51 #if DEBUG | |
| 52 allow_macro_instructions_(true), | |
| 53 #endif | |
| 54 has_frame_(false), | |
| 55 use_real_aborts_(true), | |
| 56 sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) { | |
| 57 if (isolate() != NULL) { | |
| 58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | |
| 59 isolate()); | |
| 60 } | |
| 61 } | |
| 62 | |
| 63 | |
| 64 void MacroAssembler::LogicalMacro(const Register& rd, | |
| 65 const Register& rn, | |
| 66 const Operand& operand, | |
| 67 LogicalOp op) { | |
| 68 if (operand.NeedsRelocation()) { | |
| 69 LoadRelocated(Tmp0(), operand); | |
| 70 Logical(rd, rn, Tmp0(), op); | |
| 71 | |
| 72 } else if (operand.IsImmediate()) { | |
| 73 int64_t immediate = operand.immediate(); | |
| 74 unsigned reg_size = rd.SizeInBits(); | |
| 75 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | |
| 76 | |
| 77 // If the operation is NOT, invert the operation and immediate. | |
| 78 if ((op & NOT) == NOT) { | |
| 79 op = static_cast<LogicalOp>(op & ~NOT); | |
| 80 immediate = ~immediate; | |
| 81 if (rd.Is32Bits()) { | |
| 82 immediate &= kWRegMask; | |
| 83 } | |
| 84 } | |
| 85 | |
| 86 // Special cases for all set or all clear immediates. | |
| 87 if (immediate == 0) { | |
| 88 switch (op) { | |
| 89 case AND: | |
| 90 Mov(rd, 0); | |
| 91 return; | |
| 92 case ORR: // Fall through. | |
| 93 case EOR: | |
| 94 Mov(rd, rn); | |
| 95 return; | |
| 96 case ANDS: // Fall through. | |
| 97 case BICS: | |
| 98 break; | |
| 99 default: | |
| 100 UNREACHABLE(); | |
| 101 } | |
| 102 } else if ((rd.Is64Bits() && (immediate == -1L)) || | |
| 103 (rd.Is32Bits() && (immediate == 0xffffffffL))) { | |
| 104 switch (op) { | |
| 105 case AND: | |
| 106 Mov(rd, rn); | |
| 107 return; | |
| 108 case ORR: | |
| 109 Mov(rd, immediate); | |
| 110 return; | |
| 111 case EOR: | |
| 112 Mvn(rd, rn); | |
| 113 return; | |
| 114 case ANDS: // Fall through. | |
| 115 case BICS: | |
| 116 break; | |
| 117 default: | |
| 118 UNREACHABLE(); | |
| 119 } | |
| 120 } | |
| 121 | |
| 122 unsigned n, imm_s, imm_r; | |
| 123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { | |
| 124 // Immediate can be encoded in the instruction. | |
| 125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | |
| 126 } else { | |
| 127 // Immediate can't be encoded: synthesize using move immediate. | |
| 128 Register temp = AppropriateTempFor(rn); | |
| 129 Mov(temp, immediate); | |
| 130 if (rd.Is(csp)) { | |
| 131 // If rd is the stack pointer we cannot use it as the destination | |
| 132 // register so we use the temp register as an intermediate again. | |
| 133 Logical(temp, rn, temp, op); | |
| 134 Mov(csp, temp); | |
| 135 } else { | |
| 136 Logical(rd, rn, temp, op); | |
| 137 } | |
| 138 } | |
| 139 | |
| 140 } else if (operand.IsExtendedRegister()) { | |
| 141 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | |
| 142 // Add/sub extended supports shift <= 4. We want to support exactly the | |
| 143 // same modes here. | |
| 144 ASSERT(operand.shift_amount() <= 4); | |
| 145 ASSERT(operand.reg().Is64Bits() || | |
| 146 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | |
| 147 Register temp = AppropriateTempFor(rn, operand.reg()); | |
| 148 EmitExtendShift(temp, operand.reg(), operand.extend(), | |
| 149 operand.shift_amount()); | |
| 150 Logical(rd, rn, temp, op); | |
| 151 | |
| 152 } else { | |
| 153 // The operand can be encoded in the instruction. | |
| 154 ASSERT(operand.IsShiftedRegister()); | |
| 155 Logical(rd, rn, operand, op); | |
| 156 } | |
| 157 } | |
| 158 | |
| 159 | |
| 160 void MacroAssembler::Mov(const Register& rd, uint64_t imm) { | |
| 161 ASSERT(allow_macro_instructions_); | |
| 162 ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); | |
| 163 ASSERT(!rd.IsZero()); | |
| 164 | |
| 165 // TODO(all) extend to support more immediates. | |
| 166 // | |
| 167 // Immediates on Aarch64 can be produced using an initial value, and zero to | |
| 168 // three move keep operations. | |
| 169 // | |
| 170 // Initial values can be generated with: | |
| 171 // 1. 64-bit move zero (movz). | |
| 172 // 2. 32-bit move inverted (movn). | |
| 173 // 3. 64-bit move inverted. | |
| 174 // 4. 32-bit orr immediate. | |
| 175 // 5. 64-bit orr immediate. | |
| 176 // Move-keep may then be used to modify each of the 16-bit half-words. | |
| 177 // | |
| 178 // The code below supports all five initial value generators, and | |
| 179 // applying move-keep operations to move-zero and move-inverted initial | |
| 180 // values. | |
| 181 | |
| 182 unsigned reg_size = rd.SizeInBits(); | |
| 183 unsigned n, imm_s, imm_r; | |
| 184 if (IsImmMovz(imm, reg_size) && !rd.IsSP()) { | |
| 185 // Immediate can be represented in a move zero instruction. Movz can't | |
| 186 // write to the stack pointer. | |
| 187 movz(rd, imm); | |
| 188 } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) { | |
| 189 // Immediate can be represented in a move inverted instruction. Movn can't | |
| 190 // write to the stack pointer. | |
| 191 movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask)); | |
| 192 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { | |
| 193 // Immediate can be represented in a logical orr instruction. | |
| 194 LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR); | |
| 195 } else { | |
| 196 // Generic immediate case. Imm will be represented by | |
| 197 // [imm3, imm2, imm1, imm0], where each imm is 16 bits. | |
| 198 // A move-zero or move-inverted is generated for the first non-zero or | |
| 199 // non-0xffff immX, and a move-keep for subsequent non-zero immX. | |
| 200 | |
| 201 uint64_t ignored_halfword = 0; | |
| 202 bool invert_move = false; | |
| 203 // If the number of 0xffff halfwords is greater than the number of 0x0000 | |
| 204 // halfwords, it's more efficient to use move-inverted. | |
| 205 if (CountClearHalfWords(~imm, reg_size) > | |
| 206 CountClearHalfWords(imm, reg_size)) { | |
| 207 ignored_halfword = 0xffffL; | |
| 208 invert_move = true; | |
| 209 } | |
| 210 | |
| 211 // Mov instructions can't move value into the stack pointer, so set up a | |
| 212 // temporary register, if needed. | |
| 213 Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd; | |
| 214 | |
| 215 // Iterate through the halfwords. Use movn/movz for the first non-ignored | |
| 216 // halfword, and movk for subsequent halfwords. | |
| 217 ASSERT((reg_size % 16) == 0); | |
| 218 bool first_mov_done = false; | |
| 219 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { | |
| 220 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; | |
| 221 if (imm16 != ignored_halfword) { | |
| 222 if (!first_mov_done) { | |
| 223 if (invert_move) { | |
| 224 movn(temp, (~imm16) & 0xffffL, 16 * i); | |
| 225 } else { | |
| 226 movz(temp, imm16, 16 * i); | |
| 227 } | |
| 228 first_mov_done = true; | |
| 229 } else { | |
| 230 // Construct a wider constant. | |
| 231 movk(temp, imm16, 16 * i); | |
| 232 } | |
| 233 } | |
| 234 } | |
| 235 ASSERT(first_mov_done); | |
| 236 | |
| 237 // Move the temporary if the original destination register was the stack | |
| 238 // pointer. | |
| 239 if (rd.IsSP()) { | |
| 240 mov(rd, temp); | |
| 241 } | |
| 242 } | |
| 243 } | |
| 244 | |
| 245 | |
| 246 void MacroAssembler::Mov(const Register& rd, | |
| 247 const Operand& operand, | |
| 248 DiscardMoveMode discard_mode) { | |
| 249 ASSERT(allow_macro_instructions_); | |
| 250 ASSERT(!rd.IsZero()); | |
| 251 // Provide a swap register for instructions that need to write into the | |
| 252 // system stack pointer (and can't do this inherently). | |
| 253 Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd); | |
| 254 | |
| 255 if (operand.NeedsRelocation()) { | |
| 256 LoadRelocated(dst, operand); | |
| 257 | |
| 258 } else if (operand.IsImmediate()) { | |
| 259 // Call the macro assembler for generic immediates. | |
| 260 Mov(dst, operand.immediate()); | |
| 261 | |
| 262 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | |
| 263 // Emit a shift instruction if moving a shifted register. This operation | |
| 264 // could also be achieved using an orr instruction (like orn used by Mvn), | |
| 265 // but using a shift instruction makes the disassembly clearer. | |
| 266 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount()); | |
| 267 | |
| 268 } else if (operand.IsExtendedRegister()) { | |
| 269 // Emit an extend instruction if moving an extended register. This handles | |
| 270 // extend with post-shift operations, too. | |
| 271 EmitExtendShift(dst, operand.reg(), operand.extend(), | |
| 272 operand.shift_amount()); | |
| 273 | |
| 274 } else { | |
| 275 // Otherwise, emit a register move only if the registers are distinct, or | |
| 276 // if they are not X registers. | |
| 277 // | |
| 278 // Note that mov(w0, w0) is not a no-op because it clears the top word of | |
| 279 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W | |
| 280 // registers is not required to clear the top word of the X register. In | |
| 281 // this case, the instruction is discarded. | |
| 282 // | |
| 283 // If csp is an operand, add #0 is emitted, otherwise, orr #0. | |
| 284 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && | |
| 285 (discard_mode == kDontDiscardForSameWReg))) { | |
| 286 Assembler::mov(rd, operand.reg()); | |
| 287 } | |
| 288 // This case can handle writes into the system stack pointer directly. | |
| 289 dst = rd; | |
| 290 } | |
| 291 | |
| 292 // Copy the result to the system stack pointer. | |
| 293 if (!dst.Is(rd)) { | |
| 294 ASSERT(rd.IsZero()); | |
| 295 ASSERT(dst.Is(Tmp1())); | |
| 296 Assembler::mov(rd, dst); | |
| 297 } | |
| 298 } | |
| 299 | |
| 300 | |
| 301 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { | |
| 302 ASSERT(allow_macro_instructions_); | |
| 303 | |
| 304 if (operand.NeedsRelocation()) { | |
| 305 LoadRelocated(Tmp0(), operand); | |
| 306 Mvn(rd, Tmp0()); | |
| 307 | |
| 308 } else if (operand.IsImmediate()) { | |
| 309 // Call the macro assembler for generic immediates. | |
| 310 Mov(rd, ~operand.immediate()); | |
| 311 | |
| 312 } else if (operand.IsExtendedRegister()) { | |
| 313 // Emit two instructions for the extend case. This differs from Mov, as | |
| 314 // the extend and invert can't be achieved in one instruction. | |
| 315 Register temp = AppropriateTempFor(rd, operand.reg()); | |
| 316 EmitExtendShift(temp, operand.reg(), operand.extend(), | |
| 317 operand.shift_amount()); | |
| 318 mvn(rd, temp); | |
| 319 | |
| 320 } else { | |
| 321 // Otherwise, emit a register move only if the registers are distinct. | |
| 322 // If the jssp is an operand, add #0 is emitted, otherwise, orr #0. | |
| 323 mvn(rd, operand); | |
| 324 } | |
| 325 } | |
| 326 | |
| 327 | |
| 328 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { | |
| 329 ASSERT((reg_size % 8) == 0); | |
| 330 int count = 0; | |
| 331 for (unsigned i = 0; i < (reg_size / 16); i++) { | |
| 332 if ((imm & 0xffff) == 0) { | |
| 333 count++; | |
| 334 } | |
| 335 imm >>= 16; | |
| 336 } | |
| 337 return count; | |
| 338 } | |
| 339 | |
| 340 | |
| 341 // The movz instruction can generate immediates containing an arbitrary 16-bit | |
| 342 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. | |
| 343 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { | |
| 344 ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); | |
| 345 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); | |
| 346 } | |
| 347 | |
| 348 | |
| 349 // The movn instruction can generate immediates containing an arbitrary 16-bit | |
| 350 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. | |
| 351 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { | |
| 352 return IsImmMovz(~imm, reg_size); | |
| 353 } | |
| 354 | |
| 355 | |
| 356 void MacroAssembler::ConditionalCompareMacro(const Register& rn, | |
| 357 const Operand& operand, | |
| 358 StatusFlags nzcv, | |
| 359 Condition cond, | |
| 360 ConditionalCompareOp op) { | |
| 361 ASSERT((cond != al) && (cond != nv)); | |
| 362 if (operand.NeedsRelocation()) { | |
| 363 LoadRelocated(Tmp0(), operand); | |
| 364 ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op); | |
| 365 | |
| 366 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || | |
| 367 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { | |
| 368 // The immediate can be encoded in the instruction, or the operand is an | |
| 369 // unshifted register: call the assembler. | |
| 370 ConditionalCompare(rn, operand, nzcv, cond, op); | |
| 371 | |
| 372 } else { | |
| 373 // The operand isn't directly supported by the instruction: perform the | |
| 374 // operation on a temporary register. | |
| 375 Register temp = AppropriateTempFor(rn); | |
| 376 Mov(temp, operand); | |
| 377 ConditionalCompare(rn, temp, nzcv, cond, op); | |
| 378 } | |
| 379 } | |
| 380 | |
| 381 | |
| 382 void MacroAssembler::Csel(const Register& rd, | |
| 383 const Register& rn, | |
| 384 const Operand& operand, | |
| 385 Condition cond) { | |
| 386 ASSERT(allow_macro_instructions_); | |
| 387 ASSERT(!rd.IsZero()); | |
| 388 ASSERT((cond != al) && (cond != nv)); | |
| 389 if (operand.IsImmediate()) { | |
| 390 // Immediate argument. Handle special cases of 0, 1 and -1 using zero | |
| 391 // register. | |
| 392 int64_t imm = operand.immediate(); | |
| 393 Register zr = AppropriateZeroRegFor(rn); | |
| 394 if (imm == 0) { | |
| 395 csel(rd, rn, zr, cond); | |
| 396 } else if (imm == 1) { | |
| 397 csinc(rd, rn, zr, cond); | |
| 398 } else if (imm == -1) { | |
| 399 csinv(rd, rn, zr, cond); | |
| 400 } else { | |
| 401 Register temp = AppropriateTempFor(rn); | |
| 402 Mov(temp, operand.immediate()); | |
| 403 csel(rd, rn, temp, cond); | |
| 404 } | |
| 405 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { | |
| 406 // Unshifted register argument. | |
| 407 csel(rd, rn, operand.reg(), cond); | |
| 408 } else { | |
| 409 // All other arguments. | |
| 410 Register temp = AppropriateTempFor(rn); | |
| 411 Mov(temp, operand); | |
| 412 csel(rd, rn, temp, cond); | |
| 413 } | |
| 414 } | |
| 415 | |
| 416 | |
| 417 void MacroAssembler::AddSubMacro(const Register& rd, | |
| 418 const Register& rn, | |
| 419 const Operand& operand, | |
| 420 FlagsUpdate S, | |
| 421 AddSubOp op) { | |
| 422 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && | |
| 423 !operand.NeedsRelocation() && (S == LeaveFlags)) { | |
| 424 // The instruction would be a nop. Avoid generating useless code. | |
| 425 return; | |
| 426 } | |
| 427 | |
| 428 if (operand.NeedsRelocation()) { | |
| 429 LoadRelocated(Tmp0(), operand); | |
| 430 AddSubMacro(rd, rn, Tmp0(), S, op); | |
| 431 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || | |
| 432 (rn.IsZero() && !operand.IsShiftedRegister()) || | |
| 433 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | |
| 434 Register temp = AppropriateTempFor(rn); | |
| 435 Mov(temp, operand); | |
| 436 AddSub(rd, rn, temp, S, op); | |
| 437 } else { | |
| 438 AddSub(rd, rn, operand, S, op); | |
| 439 } | |
| 440 } | |
| 441 | |
| 442 | |
| 443 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, | |
| 444 const Register& rn, | |
| 445 const Operand& operand, | |
| 446 FlagsUpdate S, | |
| 447 AddSubWithCarryOp op) { | |
| 448 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
| 449 | |
| 450 if (operand.NeedsRelocation()) { | |
| 451 LoadRelocated(Tmp0(), operand); | |
| 452 AddSubWithCarryMacro(rd, rn, Tmp0(), S, op); | |
| 453 | |
| 454 } else if (operand.IsImmediate() || | |
| 455 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | |
| 456 // Add/sub with carry (immediate or ROR shifted register.) | |
| 457 Register temp = AppropriateTempFor(rn); | |
| 458 Mov(temp, operand); | |
| 459 AddSubWithCarry(rd, rn, temp, S, op); | |
| 460 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | |
| 461 // Add/sub with carry (shifted register). | |
| 462 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | |
| 463 ASSERT(operand.shift() != ROR); | |
| 464 ASSERT(is_uintn(operand.shift_amount(), | |
| 465 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2)); | |
| 466 Register temp = AppropriateTempFor(rn, operand.reg()); | |
| 467 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); | |
| 468 AddSubWithCarry(rd, rn, temp, S, op); | |
| 469 | |
| 470 } else if (operand.IsExtendedRegister()) { | |
| 471 // Add/sub with carry (extended register). | |
| 472 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | |
| 473 // Add/sub extended supports a shift <= 4. We want to support exactly the | |
| 474 // same modes. | |
| 475 ASSERT(operand.shift_amount() <= 4); | |
| 476 ASSERT(operand.reg().Is64Bits() || | |
| 477 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | |
| 478 Register temp = AppropriateTempFor(rn, operand.reg()); | |
| 479 EmitExtendShift(temp, operand.reg(), operand.extend(), | |
| 480 operand.shift_amount()); | |
| 481 AddSubWithCarry(rd, rn, temp, S, op); | |
| 482 | |
| 483 } else { | |
| 484 // The addressing mode is directly supported by the instruction. | |
| 485 AddSubWithCarry(rd, rn, operand, S, op); | |
| 486 } | |
| 487 } | |
| 488 | |
| 489 | |
| 490 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, | |
| 491 const MemOperand& addr, | |
| 492 LoadStoreOp op) { | |
| 493 int64_t offset = addr.offset(); | |
| 494 LSDataSize size = CalcLSDataSize(op); | |
| 495 | |
| 496 // Check if an immediate offset fits in the immediate field of the | |
| 497 // appropriate instruction. If not, emit two instructions to perform | |
| 498 // the operation. | |
| 499 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && | |
| 500 !IsImmLSUnscaled(offset)) { | |
| 501 // Immediate offset that can't be encoded using unsigned or unscaled | |
| 502 // addressing modes. | |
| 503 Register temp = AppropriateTempFor(addr.base()); | |
| 504 Mov(temp, addr.offset()); | |
| 505 LoadStore(rt, MemOperand(addr.base(), temp), op); | |
| 506 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { | |
| 507 // Post-index beyond unscaled addressing range. | |
| 508 LoadStore(rt, MemOperand(addr.base()), op); | |
| 509 add(addr.base(), addr.base(), offset); | |
| 510 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { | |
| 511 // Pre-index beyond unscaled addressing range. | |
| 512 add(addr.base(), addr.base(), offset); | |
| 513 LoadStore(rt, MemOperand(addr.base()), op); | |
| 514 } else { | |
| 515 // Encodable in one load/store instruction. | |
| 516 LoadStore(rt, addr, op); | |
| 517 } | |
| 518 } | |
| 519 | |
| 520 | |
| 521 void MacroAssembler::Load(const Register& rt, | |
| 522 const MemOperand& addr, | |
| 523 Representation r) { | |
| 524 ASSERT(!r.IsDouble()); | |
| 525 | |
| 526 if (r.IsInteger8()) { | |
| 527 Ldrsb(rt, addr); | |
| 528 } else if (r.IsUInteger8()) { | |
| 529 Ldrb(rt, addr); | |
| 530 } else if (r.IsInteger16()) { | |
| 531 Ldrsh(rt, addr); | |
| 532 } else if (r.IsUInteger16()) { | |
| 533 Ldrh(rt, addr); | |
| 534 } else if (r.IsInteger32()) { | |
| 535 Ldr(rt.W(), addr); | |
| 536 } else { | |
| 537 ASSERT(rt.Is64Bits()); | |
| 538 Ldr(rt, addr); | |
| 539 } | |
| 540 } | |
| 541 | |
| 542 | |
| 543 void MacroAssembler::Store(const Register& rt, | |
| 544 const MemOperand& addr, | |
| 545 Representation r) { | |
| 546 ASSERT(!r.IsDouble()); | |
| 547 | |
| 548 if (r.IsInteger8() || r.IsUInteger8()) { | |
| 549 Strb(rt, addr); | |
| 550 } else if (r.IsInteger16() || r.IsUInteger16()) { | |
| 551 Strh(rt, addr); | |
| 552 } else if (r.IsInteger32()) { | |
| 553 Str(rt.W(), addr); | |
| 554 } else { | |
| 555 ASSERT(rt.Is64Bits()); | |
| 556 Str(rt, addr); | |
| 557 } | |
| 558 } | |
| 559 | |
| 560 | |
| 561 bool MacroAssembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { | |
| 562 // Account for the branch around the veneers and the guard. | |
| 563 int protection_offset = 2 * kInstructionSize; | |
| 564 return pc_offset() > max_reachable_pc - margin - protection_offset - | |
| 565 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize); | |
| 566 } | |
| 567 | |
| 568 | |
| 569 void MacroAssembler::EmitVeneers(bool need_protection) { | |
| 570 RecordComment("[ Veneers"); | |
| 571 | |
| 572 Label end; | |
| 573 if (need_protection) { | |
| 574 B(&end); | |
| 575 } | |
| 576 | |
| 577 EmitVeneersGuard(); | |
| 578 | |
| 579 { | |
| 580 InstructionAccurateScope scope(this); | |
| 581 Label size_check; | |
| 582 | |
| 583 std::multimap<int, FarBranchInfo>::iterator it, it_to_delete; | |
| 584 | |
| 585 it = unresolved_branches_.begin(); | |
| 586 while (it != unresolved_branches_.end()) { | |
| 587 if (ShouldEmitVeneer(it->first)) { | |
| 588 Instruction* branch = InstructionAt(it->second.pc_offset_); | |
| 589 Label* label = it->second.label_; | |
| 590 | |
| 591 #ifdef DEBUG | |
| 592 __ bind(&size_check); | |
| 593 #endif | |
| 594 // Patch the branch to point to the current position, and emit a branch | |
| 595 // to the label. | |
| 596 Instruction* veneer = reinterpret_cast<Instruction*>(pc_); | |
| 597 RemoveBranchFromLabelLinkChain(branch, label, veneer); | |
| 598 branch->SetImmPCOffsetTarget(veneer); | |
| 599 b(label); | |
| 600 #ifdef DEBUG | |
| 601 ASSERT(SizeOfCodeGeneratedSince(&size_check) <= | |
| 602 static_cast<uint64_t>(kMaxVeneerCodeSize)); | |
| 603 size_check.Unuse(); | |
| 604 #endif | |
| 605 | |
| 606 it_to_delete = it++; | |
| 607 unresolved_branches_.erase(it_to_delete); | |
| 608 } else { | |
| 609 ++it; | |
| 610 } | |
| 611 } | |
| 612 } | |
| 613 | |
| 614 Bind(&end); | |
| 615 | |
| 616 RecordComment("]"); | |
| 617 } | |
| 618 | |
| 619 | |
| 620 void MacroAssembler::EmitVeneersGuard() { | |
| 621 if (emit_debug_code()) { | |
| 622 Unreachable(); | |
| 623 } | |
| 624 } | |
| 625 | |
| 626 | |
| 627 void MacroAssembler::CheckVeneers(bool need_protection) { | |
| 628 if (unresolved_branches_.empty()) { | |
| 629 return; | |
| 630 } | |
| 631 | |
| 632 CHECK(pc_offset() < unresolved_branches_first_limit()); | |
| 633 int margin = kVeneerDistanceMargin; | |
| 634 if (!need_protection) { | |
| 635 // Prefer emitting veneers protected by an existing instruction. | |
| 636 // The 4 divisor is a finger in the air guess. With a default margin of 2KB, | |
| 637 // that leaves 512B = 128 instructions of extra margin to avoid requiring a | |
| 638 // protective branch. | |
| 639 margin += margin / 4; | |
| 640 } | |
| 641 if (ShouldEmitVeneer(unresolved_branches_first_limit(), margin)) { | |
| 642 EmitVeneers(need_protection); | |
| 643 } | |
| 644 } | |
| 645 | |
| 646 | |
| 647 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( | |
| 648 Label *label, ImmBranchType b_type) { | |
| 649 bool need_longer_range = false; | |
| 650 // There are two situations in which we care about the offset being out of | |
| 651 // range: | |
| 652 // - The label is bound but too far away. | |
| 653 // - The label is not bound but linked, and the previous branch | |
| 654 // instruction in the chain is too far away. | |
| 655 if (label->is_bound() || label->is_linked()) { | |
| 656 need_longer_range = | |
| 657 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset()); | |
| 658 } | |
| 659 if (!need_longer_range && !label->is_bound()) { | |
| 660 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type); | |
| 661 unresolved_branches_.insert( | |
| 662 std::pair<int, FarBranchInfo>(max_reachable_pc, | |
| 663 FarBranchInfo(pc_offset(), label))); | |
| 664 } | |
| 665 return need_longer_range; | |
| 666 } | |
| 667 | |
| 668 | |
| 669 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { | |
| 670 ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && | |
| 671 (bit == -1 || type >= kBranchTypeFirstUsingBit)); | |
| 672 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { | |
| 673 B(static_cast<Condition>(type), label); | |
| 674 } else { | |
| 675 switch (type) { | |
| 676 case always: B(label); break; | |
| 677 case never: break; | |
| 678 case reg_zero: Cbz(reg, label); break; | |
| 679 case reg_not_zero: Cbnz(reg, label); break; | |
| 680 case reg_bit_clear: Tbz(reg, bit, label); break; | |
| 681 case reg_bit_set: Tbnz(reg, bit, label); break; | |
| 682 default: | |
| 683 UNREACHABLE(); | |
| 684 } | |
| 685 } | |
| 686 } | |
| 687 | |
| 688 | |
| 689 void MacroAssembler::B(Label* label, Condition cond) { | |
| 690 ASSERT(allow_macro_instructions_); | |
| 691 ASSERT((cond != al) && (cond != nv)); | |
| 692 | |
| 693 Label done; | |
| 694 bool need_extra_instructions = | |
| 695 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType); | |
| 696 | |
| 697 if (need_extra_instructions) { | |
| 698 b(&done, InvertCondition(cond)); | |
| 699 b(label); | |
| 700 } else { | |
| 701 b(label, cond); | |
| 702 } | |
| 703 CheckVeneers(!need_extra_instructions); | |
| 704 bind(&done); | |
| 705 } | |
| 706 | |
| 707 | |
| 708 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { | |
| 709 ASSERT(allow_macro_instructions_); | |
| 710 | |
| 711 Label done; | |
| 712 bool need_extra_instructions = | |
| 713 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); | |
| 714 | |
| 715 if (need_extra_instructions) { | |
| 716 tbz(rt, bit_pos, &done); | |
| 717 b(label); | |
| 718 } else { | |
| 719 tbnz(rt, bit_pos, label); | |
| 720 } | |
| 721 CheckVeneers(!need_extra_instructions); | |
| 722 bind(&done); | |
| 723 } | |
| 724 | |
| 725 | |
| 726 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { | |
| 727 ASSERT(allow_macro_instructions_); | |
| 728 | |
| 729 Label done; | |
| 730 bool need_extra_instructions = | |
| 731 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); | |
| 732 | |
| 733 if (need_extra_instructions) { | |
| 734 tbnz(rt, bit_pos, &done); | |
| 735 b(label); | |
| 736 } else { | |
| 737 tbz(rt, bit_pos, label); | |
| 738 } | |
| 739 CheckVeneers(!need_extra_instructions); | |
| 740 bind(&done); | |
| 741 } | |
| 742 | |
| 743 | |
| 744 void MacroAssembler::Cbnz(const Register& rt, Label* label) { | |
| 745 ASSERT(allow_macro_instructions_); | |
| 746 | |
| 747 Label done; | |
| 748 bool need_extra_instructions = | |
| 749 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); | |
| 750 | |
| 751 if (need_extra_instructions) { | |
| 752 cbz(rt, &done); | |
| 753 b(label); | |
| 754 } else { | |
| 755 cbnz(rt, label); | |
| 756 } | |
| 757 CheckVeneers(!need_extra_instructions); | |
| 758 bind(&done); | |
| 759 } | |
| 760 | |
| 761 | |
| 762 void MacroAssembler::Cbz(const Register& rt, Label* label) { | |
| 763 ASSERT(allow_macro_instructions_); | |
| 764 | |
| 765 Label done; | |
| 766 bool need_extra_instructions = | |
| 767 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); | |
| 768 | |
| 769 if (need_extra_instructions) { | |
| 770 cbnz(rt, &done); | |
| 771 b(label); | |
| 772 } else { | |
| 773 cbz(rt, label); | |
| 774 } | |
| 775 CheckVeneers(!need_extra_instructions); | |
| 776 bind(&done); | |
| 777 } | |
| 778 | |
| 779 | |
| 780 // Pseudo-instructions. | |
| 781 | |
| 782 | |
| 783 void MacroAssembler::Abs(const Register& rd, const Register& rm, | |
| 784 Label* is_not_representable, | |
| 785 Label* is_representable) { | |
| 786 ASSERT(allow_macro_instructions_); | |
| 787 ASSERT(AreSameSizeAndType(rd, rm)); | |
| 788 | |
| 789 Cmp(rm, 1); | |
| 790 Cneg(rd, rm, lt); | |
| 791 | |
| 792 // If the comparison sets the v flag, the input was the smallest value | |
| 793 // representable by rm, and the mathematical result of abs(rm) is not | |
| 794 // representable using two's complement. | |
| 795 if ((is_not_representable != NULL) && (is_representable != NULL)) { | |
| 796 B(is_not_representable, vs); | |
| 797 B(is_representable); | |
| 798 } else if (is_not_representable != NULL) { | |
| 799 B(is_not_representable, vs); | |
| 800 } else if (is_representable != NULL) { | |
| 801 B(is_representable, vc); | |
| 802 } | |
| 803 } | |
| 804 | |
| 805 | |
| 806 // Abstracted stack operations. | |
| 807 | |
| 808 | |
| 809 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, | |
| 810 const CPURegister& src2, const CPURegister& src3) { | |
| 811 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); | |
| 812 ASSERT(src0.IsValid()); | |
| 813 | |
| 814 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); | |
| 815 int size = src0.SizeInBytes(); | |
| 816 | |
| 817 PrepareForPush(count, size); | |
| 818 PushHelper(count, size, src0, src1, src2, src3); | |
| 819 } | |
| 820 | |
| 821 | |
| 822 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, | |
| 823 const CPURegister& dst2, const CPURegister& dst3) { | |
| 824 // It is not valid to pop into the same register more than once in one | |
| 825 // instruction, not even into the zero register. | |
| 826 ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); | |
| 827 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); | |
| 828 ASSERT(dst0.IsValid()); | |
| 829 | |
| 830 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); | |
| 831 int size = dst0.SizeInBytes(); | |
| 832 | |
| 833 PrepareForPop(count, size); | |
| 834 PopHelper(count, size, dst0, dst1, dst2, dst3); | |
| 835 | |
| 836 if (!csp.Is(StackPointer()) && emit_debug_code()) { | |
| 837 // It is safe to leave csp where it is when unwinding the JavaScript stack, | |
| 838 // but if we keep it matching StackPointer, the simulator can detect memory | |
| 839 // accesses in the now-free part of the stack. | |
| 840 Mov(csp, StackPointer()); | |
| 841 } | |
| 842 } | |
| 843 | |
| 844 | |
| 845 void MacroAssembler::PushPopQueue::PushQueued() { | |
| 846 if (queued_.empty()) return; | |
| 847 | |
| 848 masm_->PrepareForPush(size_); | |
| 849 | |
| 850 int count = queued_.size(); | |
| 851 int index = 0; | |
| 852 while (index < count) { | |
| 853 // PushHelper can only handle registers with the same size and type, and it | |
| 854 // can handle only four at a time. Batch them up accordingly. | |
| 855 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; | |
| 856 int batch_index = 0; | |
| 857 do { | |
| 858 batch[batch_index++] = queued_[index++]; | |
| 859 } while ((batch_index < 4) && (index < count) && | |
| 860 batch[0].IsSameSizeAndType(queued_[index])); | |
| 861 | |
| 862 masm_->PushHelper(batch_index, batch[0].SizeInBytes(), | |
| 863 batch[0], batch[1], batch[2], batch[3]); | |
| 864 } | |
| 865 | |
| 866 queued_.clear(); | |
| 867 } | |
| 868 | |
| 869 | |
| 870 void MacroAssembler::PushPopQueue::PopQueued() { | |
| 871 if (queued_.empty()) return; | |
| 872 | |
| 873 masm_->PrepareForPop(size_); | |
| 874 | |
| 875 int count = queued_.size(); | |
| 876 int index = 0; | |
| 877 while (index < count) { | |
| 878 // PopHelper can only handle registers with the same size and type, and it | |
| 879 // can handle only four at a time. Batch them up accordingly. | |
| 880 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; | |
| 881 int batch_index = 0; | |
| 882 do { | |
| 883 batch[batch_index++] = queued_[index++]; | |
| 884 } while ((batch_index < 4) && (index < count) && | |
| 885 batch[0].IsSameSizeAndType(queued_[index])); | |
| 886 | |
| 887 masm_->PopHelper(batch_index, batch[0].SizeInBytes(), | |
| 888 batch[0], batch[1], batch[2], batch[3]); | |
| 889 } | |
| 890 | |
| 891 queued_.clear(); | |
| 892 } | |
| 893 | |
| 894 | |
| 895 void MacroAssembler::PushCPURegList(CPURegList registers) { | |
| 896 int size = registers.RegisterSizeInBytes(); | |
| 897 | |
| 898 PrepareForPush(registers.Count(), size); | |
| 899 // Push up to four registers at a time because if the current stack pointer is | |
| 900 // csp and reg_size is 32, registers must be pushed in blocks of four in order | |
| 901 // to maintain the 16-byte alignment for csp. | |
| 902 while (!registers.IsEmpty()) { | |
| 903 int count_before = registers.Count(); | |
| 904 const CPURegister& src0 = registers.PopHighestIndex(); | |
| 905 const CPURegister& src1 = registers.PopHighestIndex(); | |
| 906 const CPURegister& src2 = registers.PopHighestIndex(); | |
| 907 const CPURegister& src3 = registers.PopHighestIndex(); | |
| 908 int count = count_before - registers.Count(); | |
| 909 PushHelper(count, size, src0, src1, src2, src3); | |
| 910 } | |
| 911 } | |
| 912 | |
| 913 | |
| 914 void MacroAssembler::PopCPURegList(CPURegList registers) { | |
| 915 int size = registers.RegisterSizeInBytes(); | |
| 916 | |
| 917 PrepareForPop(registers.Count(), size); | |
| 918 // Pop up to four registers at a time because if the current stack pointer is | |
| 919 // csp and reg_size is 32, registers must be pushed in blocks of four in | |
| 920 // order to maintain the 16-byte alignment for csp. | |
| 921 while (!registers.IsEmpty()) { | |
| 922 int count_before = registers.Count(); | |
| 923 const CPURegister& dst0 = registers.PopLowestIndex(); | |
| 924 const CPURegister& dst1 = registers.PopLowestIndex(); | |
| 925 const CPURegister& dst2 = registers.PopLowestIndex(); | |
| 926 const CPURegister& dst3 = registers.PopLowestIndex(); | |
| 927 int count = count_before - registers.Count(); | |
| 928 PopHelper(count, size, dst0, dst1, dst2, dst3); | |
| 929 } | |
| 930 | |
| 931 if (!csp.Is(StackPointer()) && emit_debug_code()) { | |
| 932 // It is safe to leave csp where it is when unwinding the JavaScript stack, | |
| 933 // but if we keep it matching StackPointer, the simulator can detect memory | |
| 934 // accesses in the now-free part of the stack. | |
| 935 Mov(csp, StackPointer()); | |
| 936 } | |
| 937 } | |
| 938 | |
| 939 | |
| 940 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { | |
| 941 int size = src.SizeInBytes(); | |
| 942 | |
| 943 PrepareForPush(count, size); | |
| 944 | |
| 945 if (FLAG_optimize_for_size && count > 8) { | |
| 946 Label loop; | |
| 947 __ Mov(Tmp0(), count / 2); | |
| 948 __ Bind(&loop); | |
| 949 PushHelper(2, size, src, src, NoReg, NoReg); | |
| 950 __ Subs(Tmp0(), Tmp0(), 1); | |
| 951 __ B(ne, &loop); | |
| 952 | |
| 953 count %= 2; | |
| 954 } | |
| 955 | |
| 956 // Push up to four registers at a time if possible because if the current | |
| 957 // stack pointer is csp and the register size is 32, registers must be pushed | |
| 958 // in blocks of four in order to maintain the 16-byte alignment for csp. | |
| 959 while (count >= 4) { | |
| 960 PushHelper(4, size, src, src, src, src); | |
| 961 count -= 4; | |
| 962 } | |
| 963 if (count >= 2) { | |
| 964 PushHelper(2, size, src, src, NoReg, NoReg); | |
| 965 count -= 2; | |
| 966 } | |
| 967 if (count == 1) { | |
| 968 PushHelper(1, size, src, NoReg, NoReg, NoReg); | |
| 969 count -= 1; | |
| 970 } | |
| 971 ASSERT(count == 0); | |
| 972 } | |
| 973 | |
| 974 | |
| 975 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { | |
| 976 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); | |
| 977 | |
| 978 Register temp = AppropriateTempFor(count); | |
| 979 | |
| 980 if (FLAG_optimize_for_size) { | |
| 981 Label loop, done; | |
| 982 | |
| 983 Subs(temp, count, 1); | |
| 984 B(mi, &done); | |
| 985 | |
| 986 // Push all registers individually, to save code size. | |
| 987 Bind(&loop); | |
| 988 Subs(temp, temp, 1); | |
| 989 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg); | |
| 990 B(pl, &loop); | |
| 991 | |
| 992 Bind(&done); | |
| 993 } else { | |
| 994 Label loop, leftover2, leftover1, done; | |
| 995 | |
| 996 Subs(temp, count, 4); | |
| 997 B(mi, &leftover2); | |
| 998 | |
| 999 // Push groups of four first. | |
| 1000 Bind(&loop); | |
| 1001 Subs(temp, temp, 4); | |
| 1002 PushHelper(4, src.SizeInBytes(), src, src, src, src); | |
| 1003 B(pl, &loop); | |
| 1004 | |
| 1005 // Push groups of two. | |
| 1006 Bind(&leftover2); | |
| 1007 Tbz(count, 1, &leftover1); | |
| 1008 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg); | |
| 1009 | |
| 1010 // Push the last one (if required). | |
| 1011 Bind(&leftover1); | |
| 1012 Tbz(count, 0, &done); | |
| 1013 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg); | |
| 1014 | |
| 1015 Bind(&done); | |
| 1016 } | |
| 1017 } | |
| 1018 | |
| 1019 | |
| 1020 void MacroAssembler::PushHelper(int count, int size, | |
| 1021 const CPURegister& src0, | |
| 1022 const CPURegister& src1, | |
| 1023 const CPURegister& src2, | |
| 1024 const CPURegister& src3) { | |
| 1025 // Ensure that we don't unintentially modify scratch or debug registers. | |
| 1026 InstructionAccurateScope scope(this); | |
| 1027 | |
| 1028 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); | |
| 1029 ASSERT(size == src0.SizeInBytes()); | |
| 1030 | |
| 1031 // When pushing multiple registers, the store order is chosen such that | |
| 1032 // Push(a, b) is equivalent to Push(a) followed by Push(b). | |
| 1033 switch (count) { | |
| 1034 case 1: | |
| 1035 ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone()); | |
| 1036 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex)); | |
| 1037 break; | |
| 1038 case 2: | |
| 1039 ASSERT(src2.IsNone() && src3.IsNone()); | |
| 1040 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex)); | |
| 1041 break; | |
| 1042 case 3: | |
| 1043 ASSERT(src3.IsNone()); | |
| 1044 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex)); | |
| 1045 str(src0, MemOperand(StackPointer(), 2 * size)); | |
| 1046 break; | |
| 1047 case 4: | |
| 1048 // Skip over 4 * size, then fill in the gap. This allows four W registers | |
| 1049 // to be pushed using csp, whilst maintaining 16-byte alignment for csp | |
| 1050 // at all times. | |
| 1051 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex)); | |
| 1052 stp(src1, src0, MemOperand(StackPointer(), 2 * size)); | |
| 1053 break; | |
| 1054 default: | |
| 1055 UNREACHABLE(); | |
| 1056 } | |
| 1057 } | |
| 1058 | |
| 1059 | |
| 1060 void MacroAssembler::PopHelper(int count, int size, | |
| 1061 const CPURegister& dst0, | |
| 1062 const CPURegister& dst1, | |
| 1063 const CPURegister& dst2, | |
| 1064 const CPURegister& dst3) { | |
| 1065 // Ensure that we don't unintentially modify scratch or debug registers. | |
| 1066 InstructionAccurateScope scope(this); | |
| 1067 | |
| 1068 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); | |
| 1069 ASSERT(size == dst0.SizeInBytes()); | |
| 1070 | |
| 1071 // When popping multiple registers, the load order is chosen such that | |
| 1072 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). | |
| 1073 switch (count) { | |
| 1074 case 1: | |
| 1075 ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); | |
| 1076 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex)); | |
| 1077 break; | |
| 1078 case 2: | |
| 1079 ASSERT(dst2.IsNone() && dst3.IsNone()); | |
| 1080 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex)); | |
| 1081 break; | |
| 1082 case 3: | |
| 1083 ASSERT(dst3.IsNone()); | |
| 1084 ldr(dst2, MemOperand(StackPointer(), 2 * size)); | |
| 1085 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex)); | |
| 1086 break; | |
| 1087 case 4: | |
| 1088 // Load the higher addresses first, then load the lower addresses and | |
| 1089 // skip the whole block in the second instruction. This allows four W | |
| 1090 // registers to be popped using csp, whilst maintaining 16-byte alignment | |
| 1091 // for csp at all times. | |
| 1092 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); | |
| 1093 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); | |
| 1094 break; | |
| 1095 default: | |
| 1096 UNREACHABLE(); | |
| 1097 } | |
| 1098 } | |
| 1099 | |
| 1100 | |
| 1101 void MacroAssembler::PrepareForPush(Operand total_size) { | |
| 1102 // TODO(jbramley): This assertion generates too much code in some debug tests. | |
| 1103 // AssertStackConsistency(); | |
| 1104 if (csp.Is(StackPointer())) { | |
| 1105 // If the current stack pointer is csp, then it must be aligned to 16 bytes | |
| 1106 // on entry and the total size of the specified registers must also be a | |
| 1107 // multiple of 16 bytes. | |
| 1108 if (total_size.IsImmediate()) { | |
| 1109 ASSERT((total_size.immediate() % 16) == 0); | |
| 1110 } | |
| 1111 | |
| 1112 // Don't check access size for non-immediate sizes. It's difficult to do | |
| 1113 // well, and it will be caught by hardware (or the simulator) anyway. | |
| 1114 } else { | |
| 1115 // Even if the current stack pointer is not the system stack pointer (csp), | |
| 1116 // the system stack pointer will still be modified in order to comply with | |
| 1117 // ABI rules about accessing memory below the system stack pointer. | |
| 1118 BumpSystemStackPointer(total_size); | |
| 1119 } | |
| 1120 } | |
| 1121 | |
| 1122 | |
| 1123 void MacroAssembler::PrepareForPop(Operand total_size) { | |
| 1124 AssertStackConsistency(); | |
| 1125 if (csp.Is(StackPointer())) { | |
| 1126 // If the current stack pointer is csp, then it must be aligned to 16 bytes | |
| 1127 // on entry and the total size of the specified registers must also be a | |
| 1128 // multiple of 16 bytes. | |
| 1129 if (total_size.IsImmediate()) { | |
| 1130 ASSERT((total_size.immediate() % 16) == 0); | |
| 1131 } | |
| 1132 | |
| 1133 // Don't check access size for non-immediate sizes. It's difficult to do | |
| 1134 // well, and it will be caught by hardware (or the simulator) anyway. | |
| 1135 } | |
| 1136 } | |
| 1137 | |
| 1138 | |
| 1139 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { | |
| 1140 if (offset.IsImmediate()) { | |
| 1141 ASSERT(offset.immediate() >= 0); | |
| 1142 } else if (emit_debug_code()) { | |
| 1143 Cmp(xzr, offset); | |
| 1144 Check(le, kStackAccessBelowStackPointer); | |
| 1145 } | |
| 1146 | |
| 1147 Str(src, MemOperand(StackPointer(), offset)); | |
| 1148 } | |
| 1149 | |
| 1150 | |
| 1151 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { | |
| 1152 if (offset.IsImmediate()) { | |
| 1153 ASSERT(offset.immediate() >= 0); | |
| 1154 } else if (emit_debug_code()) { | |
| 1155 Cmp(xzr, offset); | |
| 1156 Check(le, kStackAccessBelowStackPointer); | |
| 1157 } | |
| 1158 | |
| 1159 Ldr(dst, MemOperand(StackPointer(), offset)); | |
| 1160 } | |
| 1161 | |
| 1162 | |
| 1163 void MacroAssembler::PokePair(const CPURegister& src1, | |
| 1164 const CPURegister& src2, | |
| 1165 int offset) { | |
| 1166 ASSERT(AreSameSizeAndType(src1, src2)); | |
| 1167 ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); | |
| 1168 Stp(src1, src2, MemOperand(StackPointer(), offset)); | |
| 1169 } | |
| 1170 | |
| 1171 | |
| 1172 void MacroAssembler::PeekPair(const CPURegister& dst1, | |
| 1173 const CPURegister& dst2, | |
| 1174 int offset) { | |
| 1175 ASSERT(AreSameSizeAndType(dst1, dst2)); | |
| 1176 ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0)); | |
| 1177 Ldp(dst1, dst2, MemOperand(StackPointer(), offset)); | |
| 1178 } | |
| 1179 | |
| 1180 | |
| 1181 void MacroAssembler::PushCalleeSavedRegisters() { | |
| 1182 // Ensure that the macro-assembler doesn't use any scratch registers. | |
| 1183 InstructionAccurateScope scope(this); | |
| 1184 | |
| 1185 // This method must not be called unless the current stack pointer is the | |
| 1186 // system stack pointer (csp). | |
| 1187 ASSERT(csp.Is(StackPointer())); | |
| 1188 | |
| 1189 MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex); | |
| 1190 | |
| 1191 stp(d14, d15, tos); | |
| 1192 stp(d12, d13, tos); | |
| 1193 stp(d10, d11, tos); | |
| 1194 stp(d8, d9, tos); | |
| 1195 | |
| 1196 stp(x29, x30, tos); | |
| 1197 stp(x27, x28, tos); // x28 = jssp | |
| 1198 stp(x25, x26, tos); | |
| 1199 stp(x23, x24, tos); | |
| 1200 stp(x21, x22, tos); | |
| 1201 stp(x19, x20, tos); | |
| 1202 } | |
| 1203 | |
| 1204 | |
| 1205 void MacroAssembler::PopCalleeSavedRegisters() { | |
| 1206 // Ensure that the macro-assembler doesn't use any scratch registers. | |
| 1207 InstructionAccurateScope scope(this); | |
| 1208 | |
| 1209 // This method must not be called unless the current stack pointer is the | |
| 1210 // system stack pointer (csp). | |
| 1211 ASSERT(csp.Is(StackPointer())); | |
| 1212 | |
| 1213 MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex); | |
| 1214 | |
| 1215 ldp(x19, x20, tos); | |
| 1216 ldp(x21, x22, tos); | |
| 1217 ldp(x23, x24, tos); | |
| 1218 ldp(x25, x26, tos); | |
| 1219 ldp(x27, x28, tos); // x28 = jssp | |
| 1220 ldp(x29, x30, tos); | |
| 1221 | |
| 1222 ldp(d8, d9, tos); | |
| 1223 ldp(d10, d11, tos); | |
| 1224 ldp(d12, d13, tos); | |
| 1225 ldp(d14, d15, tos); | |
| 1226 } | |
| 1227 | |
| 1228 | |
| 1229 void MacroAssembler::AssertStackConsistency() { | |
| 1230 if (emit_debug_code()) { | |
| 1231 if (csp.Is(StackPointer())) { | |
| 1232 // We can't check the alignment of csp without using a scratch register | |
| 1233 // (or clobbering the flags), but the processor (or simulator) will abort | |
| 1234 // if it is not properly aligned during a load. | |
| 1235 ldr(xzr, MemOperand(csp, 0)); | |
| 1236 } else if (FLAG_enable_slow_asserts) { | |
| 1237 Label ok; | |
| 1238 // Check that csp <= StackPointer(), preserving all registers and NZCV. | |
| 1239 sub(StackPointer(), csp, StackPointer()); | |
| 1240 cbz(StackPointer(), &ok); // Ok if csp == StackPointer(). | |
| 1241 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer(). | |
| 1242 | |
| 1243 Abort(kTheCurrentStackPointerIsBelowCsp); | |
| 1244 | |
| 1245 bind(&ok); | |
| 1246 // Restore StackPointer(). | |
| 1247 sub(StackPointer(), csp, StackPointer()); | |
| 1248 } | |
| 1249 } | |
| 1250 } | |
| 1251 | |
| 1252 | |
| 1253 void MacroAssembler::LoadRoot(Register destination, | |
| 1254 Heap::RootListIndex index) { | |
| 1255 // TODO(jbramley): Most root values are constants, and can be synthesized | |
| 1256 // without a load. Refer to the ARM back end for details. | |
| 1257 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); | |
| 1258 } | |
| 1259 | |
| 1260 | |
| 1261 void MacroAssembler::StoreRoot(Register source, | |
| 1262 Heap::RootListIndex index) { | |
| 1263 Str(source, MemOperand(root, index << kPointerSizeLog2)); | |
| 1264 } | |
| 1265 | |
| 1266 | |
| 1267 void MacroAssembler::LoadTrueFalseRoots(Register true_root, | |
| 1268 Register false_root) { | |
| 1269 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex); | |
| 1270 Ldp(true_root, false_root, | |
| 1271 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2)); | |
| 1272 } | |
| 1273 | |
| 1274 | |
| 1275 void MacroAssembler::LoadHeapObject(Register result, | |
| 1276 Handle<HeapObject> object) { | |
| 1277 AllowDeferredHandleDereference using_raw_address; | |
| 1278 if (isolate()->heap()->InNewSpace(*object)) { | |
| 1279 Handle<Cell> cell = isolate()->factory()->NewCell(object); | |
| 1280 Mov(result, Operand(cell)); | |
| 1281 Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); | |
| 1282 } else { | |
| 1283 Mov(result, Operand(object)); | |
| 1284 } | |
| 1285 } | |
| 1286 | |
| 1287 | |
| 1288 void MacroAssembler::LoadInstanceDescriptors(Register map, | |
| 1289 Register descriptors) { | |
| 1290 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); | |
| 1291 } | |
| 1292 | |
| 1293 | |
| 1294 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { | |
| 1295 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | |
| 1296 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); | |
| 1297 } | |
| 1298 | |
| 1299 | |
| 1300 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) { | |
| 1301 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | |
| 1302 Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset)); | |
| 1303 And(dst, dst, Map::EnumLengthBits::kMask); | |
| 1304 } | |
| 1305 | |
| 1306 | |
| 1307 void MacroAssembler::EnumLengthSmi(Register dst, Register map) { | |
| 1308 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | |
| 1309 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | |
| 1310 And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask))); | |
| 1311 } | |
| 1312 | |
| 1313 | |
| 1314 void MacroAssembler::CheckEnumCache(Register object, | |
| 1315 Register null_value, | |
| 1316 Register scratch0, | |
| 1317 Register scratch1, | |
| 1318 Register scratch2, | |
| 1319 Register scratch3, | |
| 1320 Label* call_runtime) { | |
| 1321 ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2, | |
| 1322 scratch3)); | |
| 1323 | |
| 1324 Register empty_fixed_array_value = scratch0; | |
| 1325 Register current_object = scratch1; | |
| 1326 | |
| 1327 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); | |
| 1328 Label next, start; | |
| 1329 | |
| 1330 Mov(current_object, object); | |
| 1331 | |
| 1332 // Check if the enum length field is properly initialized, indicating that | |
| 1333 // there is an enum cache. | |
| 1334 Register map = scratch2; | |
| 1335 Register enum_length = scratch3; | |
| 1336 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset)); | |
| 1337 | |
| 1338 EnumLengthUntagged(enum_length, map); | |
| 1339 Cmp(enum_length, kInvalidEnumCacheSentinel); | |
| 1340 B(eq, call_runtime); | |
| 1341 | |
| 1342 B(&start); | |
| 1343 | |
| 1344 Bind(&next); | |
| 1345 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset)); | |
| 1346 | |
| 1347 // For all objects but the receiver, check that the cache is empty. | |
| 1348 EnumLengthUntagged(enum_length, map); | |
| 1349 Cbnz(enum_length, call_runtime); | |
| 1350 | |
| 1351 Bind(&start); | |
| 1352 | |
| 1353 // Check that there are no elements. Register current_object contains the | |
| 1354 // current JS object we've reached through the prototype chain. | |
| 1355 Label no_elements; | |
| 1356 Ldr(current_object, FieldMemOperand(current_object, | |
| 1357 JSObject::kElementsOffset)); | |
| 1358 Cmp(current_object, empty_fixed_array_value); | |
| 1359 B(eq, &no_elements); | |
| 1360 | |
| 1361 // Second chance, the object may be using the empty slow element dictionary. | |
| 1362 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex); | |
| 1363 B(ne, call_runtime); | |
| 1364 | |
| 1365 Bind(&no_elements); | |
| 1366 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset)); | |
| 1367 Cmp(current_object, null_value); | |
| 1368 B(ne, &next); | |
| 1369 } | |
| 1370 | |
| 1371 | |
| 1372 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver, | |
| 1373 Register scratch1, | |
| 1374 Register scratch2, | |
| 1375 Label* no_memento_found) { | |
| 1376 ExternalReference new_space_start = | |
| 1377 ExternalReference::new_space_start(isolate()); | |
| 1378 ExternalReference new_space_allocation_top = | |
| 1379 ExternalReference::new_space_allocation_top_address(isolate()); | |
| 1380 | |
| 1381 Add(scratch1, receiver, | |
| 1382 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag); | |
| 1383 Cmp(scratch1, Operand(new_space_start)); | |
| 1384 B(lt, no_memento_found); | |
| 1385 | |
| 1386 Mov(scratch2, Operand(new_space_allocation_top)); | |
| 1387 Ldr(scratch2, MemOperand(scratch2)); | |
| 1388 Cmp(scratch1, scratch2); | |
| 1389 B(gt, no_memento_found); | |
| 1390 | |
| 1391 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize)); | |
| 1392 Cmp(scratch1, | |
| 1393 Operand(isolate()->factory()->allocation_memento_map())); | |
| 1394 } | |
| 1395 | |
| 1396 | |
| 1397 void MacroAssembler::JumpToHandlerEntry(Register exception, | |
| 1398 Register object, | |
| 1399 Register state, | |
| 1400 Register scratch1, | |
| 1401 Register scratch2) { | |
| 1402 // Handler expects argument in x0. | |
| 1403 ASSERT(exception.Is(x0)); | |
| 1404 | |
| 1405 // Compute the handler entry address and jump to it. The handler table is | |
| 1406 // a fixed array of (smi-tagged) code offsets. | |
| 1407 Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset)); | |
| 1408 Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 1409 STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2); | |
| 1410 Lsr(scratch2, state, StackHandler::kKindWidth); | |
| 1411 Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
| 1412 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); | |
| 1413 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); | |
| 1414 Br(scratch1); | |
| 1415 } | |
| 1416 | |
| 1417 | |
| 1418 void MacroAssembler::InNewSpace(Register object, | |
| 1419 Condition cond, | |
| 1420 Label* branch) { | |
| 1421 ASSERT(cond == eq || cond == ne); | |
| 1422 // Use Tmp1() to have a different destination register, as Tmp0() will be used | |
| 1423 // for relocation. | |
| 1424 And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate()))); | |
| 1425 Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate()))); | |
| 1426 B(cond, branch); | |
| 1427 } | |
| 1428 | |
| 1429 | |
| 1430 void MacroAssembler::Throw(Register value, | |
| 1431 Register scratch1, | |
| 1432 Register scratch2, | |
| 1433 Register scratch3, | |
| 1434 Register scratch4) { | |
| 1435 // Adjust this code if not the case. | |
| 1436 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | |
| 1437 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 1438 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | |
| 1439 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | |
| 1440 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | |
| 1441 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | |
| 1442 | |
| 1443 // The handler expects the exception in x0. | |
| 1444 ASSERT(value.Is(x0)); | |
| 1445 | |
| 1446 // Drop the stack pointer to the top of the top handler. | |
| 1447 ASSERT(jssp.Is(StackPointer())); | |
| 1448 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, | |
| 1449 isolate()))); | |
| 1450 Ldr(jssp, MemOperand(scratch1)); | |
| 1451 // Restore the next handler. | |
| 1452 Pop(scratch2); | |
| 1453 Str(scratch2, MemOperand(scratch1)); | |
| 1454 | |
| 1455 // Get the code object and state. Restore the context and frame pointer. | |
| 1456 Register object = scratch1; | |
| 1457 Register state = scratch2; | |
| 1458 Pop(object, state, cp, fp); | |
| 1459 | |
| 1460 // If the handler is a JS frame, restore the context to the frame. | |
| 1461 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp | |
| 1462 // or cp. | |
| 1463 Label not_js_frame; | |
| 1464 Cbz(cp, ¬_js_frame); | |
| 1465 Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
| 1466 Bind(¬_js_frame); | |
| 1467 | |
| 1468 JumpToHandlerEntry(value, object, state, scratch3, scratch4); | |
| 1469 } | |
| 1470 | |
| 1471 | |
| 1472 void MacroAssembler::ThrowUncatchable(Register value, | |
| 1473 Register scratch1, | |
| 1474 Register scratch2, | |
| 1475 Register scratch3, | |
| 1476 Register scratch4) { | |
| 1477 // Adjust this code if not the case. | |
| 1478 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | |
| 1479 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | |
| 1480 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | |
| 1481 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | |
| 1482 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | |
| 1483 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | |
| 1484 | |
| 1485 // The handler expects the exception in x0. | |
| 1486 ASSERT(value.Is(x0)); | |
| 1487 | |
| 1488 // Drop the stack pointer to the top of the top stack handler. | |
| 1489 ASSERT(jssp.Is(StackPointer())); | |
| 1490 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, | |
| 1491 isolate()))); | |
| 1492 Ldr(jssp, MemOperand(scratch1)); | |
| 1493 | |
| 1494 // Unwind the handlers until the ENTRY handler is found. | |
| 1495 Label fetch_next, check_kind; | |
| 1496 B(&check_kind); | |
| 1497 Bind(&fetch_next); | |
| 1498 Peek(jssp, StackHandlerConstants::kNextOffset); | |
| 1499 | |
| 1500 Bind(&check_kind); | |
| 1501 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); | |
| 1502 Peek(scratch2, StackHandlerConstants::kStateOffset); | |
| 1503 TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next); | |
| 1504 | |
| 1505 // Set the top handler address to next handler past the top ENTRY handler. | |
| 1506 Pop(scratch2); | |
| 1507 Str(scratch2, MemOperand(scratch1)); | |
| 1508 | |
| 1509 // Get the code object and state. Clear the context and frame pointer (0 was | |
| 1510 // saved in the handler). | |
| 1511 Register object = scratch1; | |
| 1512 Register state = scratch2; | |
| 1513 Pop(object, state, cp, fp); | |
| 1514 | |
| 1515 JumpToHandlerEntry(value, object, state, scratch3, scratch4); | |
| 1516 } | |
| 1517 | |
| 1518 | |
| 1519 void MacroAssembler::Throw(BailoutReason reason) { | |
| 1520 Label throw_start; | |
| 1521 Bind(&throw_start); | |
| 1522 #ifdef DEBUG | |
| 1523 const char* msg = GetBailoutReason(reason); | |
| 1524 RecordComment("Throw message: "); | |
| 1525 RecordComment((msg != NULL) ? msg : "UNKNOWN"); | |
| 1526 #endif | |
| 1527 | |
| 1528 Mov(x0, Operand(Smi::FromInt(reason))); | |
| 1529 Push(x0); | |
| 1530 | |
| 1531 // Disable stub call restrictions to always allow calls to throw. | |
| 1532 if (!has_frame_) { | |
| 1533 // We don't actually want to generate a pile of code for this, so just | |
| 1534 // claim there is a stack frame, without generating one. | |
| 1535 FrameScope scope(this, StackFrame::NONE); | |
| 1536 CallRuntime(Runtime::kThrowMessage, 1); | |
| 1537 } else { | |
| 1538 CallRuntime(Runtime::kThrowMessage, 1); | |
| 1539 } | |
| 1540 // ThrowMessage should not return here. | |
| 1541 Unreachable(); | |
| 1542 } | |
| 1543 | |
| 1544 | |
| 1545 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) { | |
| 1546 Label ok; | |
| 1547 B(InvertCondition(cc), &ok); | |
| 1548 Throw(reason); | |
| 1549 Bind(&ok); | |
| 1550 } | |
| 1551 | |
| 1552 | |
| 1553 void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) { | |
| 1554 Label ok; | |
| 1555 JumpIfNotSmi(value, &ok); | |
| 1556 Throw(reason); | |
| 1557 Bind(&ok); | |
| 1558 } | |
| 1559 | |
| 1560 | |
| 1561 void MacroAssembler::SmiAbs(const Register& smi, Label* slow) { | |
| 1562 ASSERT(smi.Is64Bits()); | |
| 1563 Abs(smi, smi, slow); | |
| 1564 } | |
| 1565 | |
| 1566 | |
| 1567 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) { | |
| 1568 if (emit_debug_code()) { | |
| 1569 STATIC_ASSERT(kSmiTag == 0); | |
| 1570 Tst(object, kSmiTagMask); | |
| 1571 Check(eq, reason); | |
| 1572 } | |
| 1573 } | |
| 1574 | |
| 1575 | |
| 1576 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) { | |
| 1577 if (emit_debug_code()) { | |
| 1578 STATIC_ASSERT(kSmiTag == 0); | |
| 1579 Tst(object, kSmiTagMask); | |
| 1580 Check(ne, reason); | |
| 1581 } | |
| 1582 } | |
| 1583 | |
| 1584 | |
| 1585 void MacroAssembler::AssertName(Register object) { | |
| 1586 if (emit_debug_code()) { | |
| 1587 STATIC_ASSERT(kSmiTag == 0); | |
| 1588 // TODO(jbramley): Add AbortIfSmi and related functions. | |
| 1589 Label not_smi; | |
| 1590 JumpIfNotSmi(object, ¬_smi); | |
| 1591 Abort(kOperandIsASmiAndNotAName); | |
| 1592 Bind(¬_smi); | |
| 1593 | |
| 1594 Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 1595 CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE); | |
| 1596 Check(ls, kOperandIsNotAName); | |
| 1597 } | |
| 1598 } | |
| 1599 | |
| 1600 | |
| 1601 void MacroAssembler::AssertString(Register object) { | |
| 1602 if (emit_debug_code()) { | |
| 1603 Register temp = Tmp1(); | |
| 1604 STATIC_ASSERT(kSmiTag == 0); | |
| 1605 Tst(object, kSmiTagMask); | |
| 1606 Check(ne, kOperandIsASmiAndNotAString); | |
| 1607 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 1608 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | |
| 1609 Check(lo, kOperandIsNotAString); | |
| 1610 } | |
| 1611 } | |
| 1612 | |
| 1613 | |
| 1614 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { | |
| 1615 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. | |
| 1616 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id); | |
| 1617 } | |
| 1618 | |
| 1619 | |
| 1620 void MacroAssembler::TailCallStub(CodeStub* stub) { | |
| 1621 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); | |
| 1622 } | |
| 1623 | |
| 1624 | |
| 1625 void MacroAssembler::CallRuntime(const Runtime::Function* f, | |
| 1626 int num_arguments, | |
| 1627 SaveFPRegsMode save_doubles) { | |
| 1628 // All arguments must be on the stack before this function is called. | |
| 1629 // x0 holds the return value after the call. | |
| 1630 | |
| 1631 // Check that the number of arguments matches what the function expects. | |
| 1632 // If f->nargs is -1, the function can accept a variable number of arguments. | |
| 1633 if (f->nargs >= 0 && f->nargs != num_arguments) { | |
| 1634 // Illegal operation: drop the stack arguments and return undefined. | |
| 1635 if (num_arguments > 0) { | |
| 1636 Drop(num_arguments); | |
| 1637 } | |
| 1638 LoadRoot(x0, Heap::kUndefinedValueRootIndex); | |
| 1639 return; | |
| 1640 } | |
| 1641 | |
| 1642 // Place the necessary arguments. | |
| 1643 Mov(x0, num_arguments); | |
| 1644 Mov(x1, Operand(ExternalReference(f, isolate()))); | |
| 1645 | |
| 1646 CEntryStub stub(1, save_doubles); | |
| 1647 CallStub(&stub); | |
| 1648 } | |
| 1649 | |
| 1650 | |
| 1651 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { | |
| 1652 return ref0.address() - ref1.address(); | |
| 1653 } | |
| 1654 | |
| 1655 | |
| 1656 void MacroAssembler::CallApiFunctionAndReturn( | |
| 1657 Register function_address, | |
| 1658 ExternalReference thunk_ref, | |
| 1659 int stack_space, | |
| 1660 int spill_offset, | |
| 1661 MemOperand return_value_operand, | |
| 1662 MemOperand* context_restore_operand) { | |
| 1663 ASM_LOCATION("CallApiFunctionAndReturn"); | |
| 1664 ExternalReference next_address = | |
| 1665 ExternalReference::handle_scope_next_address(isolate()); | |
| 1666 const int kNextOffset = 0; | |
| 1667 const int kLimitOffset = AddressOffset( | |
| 1668 ExternalReference::handle_scope_limit_address(isolate()), | |
| 1669 next_address); | |
| 1670 const int kLevelOffset = AddressOffset( | |
| 1671 ExternalReference::handle_scope_level_address(isolate()), | |
| 1672 next_address); | |
| 1673 | |
| 1674 ASSERT(function_address.is(x1) || function_address.is(x2)); | |
| 1675 | |
| 1676 Label profiler_disabled; | |
| 1677 Label end_profiler_check; | |
| 1678 bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address(); | |
| 1679 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1); | |
| 1680 Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag)); | |
| 1681 Ldrb(w10, MemOperand(x10)); | |
| 1682 Cbz(w10, &profiler_disabled); | |
| 1683 Mov(x3, Operand(thunk_ref)); | |
| 1684 B(&end_profiler_check); | |
| 1685 | |
| 1686 Bind(&profiler_disabled); | |
| 1687 Mov(x3, function_address); | |
| 1688 Bind(&end_profiler_check); | |
| 1689 | |
| 1690 // Save the callee-save registers we are going to use. | |
| 1691 // TODO(all): Is this necessary? ARM doesn't do it. | |
| 1692 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4); | |
| 1693 Poke(x19, (spill_offset + 0) * kXRegSizeInBytes); | |
| 1694 Poke(x20, (spill_offset + 1) * kXRegSizeInBytes); | |
| 1695 Poke(x21, (spill_offset + 2) * kXRegSizeInBytes); | |
| 1696 Poke(x22, (spill_offset + 3) * kXRegSizeInBytes); | |
| 1697 | |
| 1698 // Allocate HandleScope in callee-save registers. | |
| 1699 // We will need to restore the HandleScope after the call to the API function, | |
| 1700 // by allocating it in callee-save registers they will be preserved by C code. | |
| 1701 Register handle_scope_base = x22; | |
| 1702 Register next_address_reg = x19; | |
| 1703 Register limit_reg = x20; | |
| 1704 Register level_reg = w21; | |
| 1705 | |
| 1706 Mov(handle_scope_base, Operand(next_address)); | |
| 1707 Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset)); | |
| 1708 Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset)); | |
| 1709 Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset)); | |
| 1710 Add(level_reg, level_reg, 1); | |
| 1711 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); | |
| 1712 | |
| 1713 if (FLAG_log_timer_events) { | |
| 1714 FrameScope frame(this, StackFrame::MANUAL); | |
| 1715 PushSafepointRegisters(); | |
| 1716 Mov(x0, Operand(ExternalReference::isolate_address(isolate()))); | |
| 1717 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); | |
| 1718 PopSafepointRegisters(); | |
| 1719 } | |
| 1720 | |
| 1721 // Native call returns to the DirectCEntry stub which redirects to the | |
| 1722 // return address pushed on stack (could have moved after GC). | |
| 1723 // DirectCEntry stub itself is generated early and never moves. | |
| 1724 DirectCEntryStub stub; | |
| 1725 stub.GenerateCall(this, x3); | |
| 1726 | |
| 1727 if (FLAG_log_timer_events) { | |
| 1728 FrameScope frame(this, StackFrame::MANUAL); | |
| 1729 PushSafepointRegisters(); | |
| 1730 Mov(x0, Operand(ExternalReference::isolate_address(isolate()))); | |
| 1731 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); | |
| 1732 PopSafepointRegisters(); | |
| 1733 } | |
| 1734 | |
| 1735 Label promote_scheduled_exception; | |
| 1736 Label exception_handled; | |
| 1737 Label delete_allocated_handles; | |
| 1738 Label leave_exit_frame; | |
| 1739 Label return_value_loaded; | |
| 1740 | |
| 1741 // Load value from ReturnValue. | |
| 1742 Ldr(x0, return_value_operand); | |
| 1743 Bind(&return_value_loaded); | |
| 1744 // No more valid handles (the result handle was the last one). Restore | |
| 1745 // previous handle scope. | |
| 1746 Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset)); | |
| 1747 if (emit_debug_code()) { | |
| 1748 Ldr(w1, MemOperand(handle_scope_base, kLevelOffset)); | |
| 1749 Cmp(w1, level_reg); | |
| 1750 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); | |
| 1751 } | |
| 1752 Sub(level_reg, level_reg, 1); | |
| 1753 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); | |
| 1754 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset)); | |
| 1755 Cmp(limit_reg, x1); | |
| 1756 B(ne, &delete_allocated_handles); | |
| 1757 | |
| 1758 Bind(&leave_exit_frame); | |
| 1759 // Restore callee-saved registers. | |
| 1760 Peek(x19, (spill_offset + 0) * kXRegSizeInBytes); | |
| 1761 Peek(x20, (spill_offset + 1) * kXRegSizeInBytes); | |
| 1762 Peek(x21, (spill_offset + 2) * kXRegSizeInBytes); | |
| 1763 Peek(x22, (spill_offset + 3) * kXRegSizeInBytes); | |
| 1764 | |
| 1765 // Check if the function scheduled an exception. | |
| 1766 Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate()))); | |
| 1767 Ldr(x5, MemOperand(x5)); | |
| 1768 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception); | |
| 1769 Bind(&exception_handled); | |
| 1770 | |
| 1771 bool restore_context = context_restore_operand != NULL; | |
| 1772 if (restore_context) { | |
| 1773 Ldr(cp, *context_restore_operand); | |
| 1774 } | |
| 1775 | |
| 1776 LeaveExitFrame(false, x1, !restore_context); | |
| 1777 Drop(stack_space); | |
| 1778 Ret(); | |
| 1779 | |
| 1780 Bind(&promote_scheduled_exception); | |
| 1781 { | |
| 1782 FrameScope frame(this, StackFrame::INTERNAL); | |
| 1783 CallExternalReference( | |
| 1784 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0); | |
| 1785 } | |
| 1786 B(&exception_handled); | |
| 1787 | |
| 1788 // HandleScope limit has changed. Delete allocated extensions. | |
| 1789 Bind(&delete_allocated_handles); | |
| 1790 Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset)); | |
| 1791 // Save the return value in a callee-save register. | |
| 1792 Register saved_result = x19; | |
| 1793 Mov(saved_result, x0); | |
| 1794 Mov(x0, Operand(ExternalReference::isolate_address(isolate()))); | |
| 1795 CallCFunction( | |
| 1796 ExternalReference::delete_handle_scope_extensions(isolate()), 1); | |
| 1797 Mov(x0, saved_result); | |
| 1798 B(&leave_exit_frame); | |
| 1799 } | |
| 1800 | |
| 1801 | |
| 1802 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | |
| 1803 int num_arguments) { | |
| 1804 Mov(x0, num_arguments); | |
| 1805 Mov(x1, Operand(ext)); | |
| 1806 | |
| 1807 CEntryStub stub(1); | |
| 1808 CallStub(&stub); | |
| 1809 } | |
| 1810 | |
| 1811 | |
| 1812 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { | |
| 1813 Mov(x1, Operand(builtin)); | |
| 1814 CEntryStub stub(1); | |
| 1815 Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); | |
| 1816 } | |
| 1817 | |
| 1818 | |
| 1819 void MacroAssembler::GetBuiltinFunction(Register target, | |
| 1820 Builtins::JavaScript id) { | |
| 1821 // Load the builtins object into target register. | |
| 1822 Ldr(target, GlobalObjectMemOperand()); | |
| 1823 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); | |
| 1824 // Load the JavaScript builtin function from the builtins object. | |
| 1825 Ldr(target, FieldMemOperand(target, | |
| 1826 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | |
| 1827 } | |
| 1828 | |
| 1829 | |
| 1830 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | |
| 1831 ASSERT(!target.is(x1)); | |
| 1832 GetBuiltinFunction(x1, id); | |
| 1833 // Load the code entry point from the builtins object. | |
| 1834 Ldr(target, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); | |
| 1835 } | |
| 1836 | |
| 1837 | |
| 1838 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | |
| 1839 InvokeFlag flag, | |
| 1840 const CallWrapper& call_wrapper) { | |
| 1841 ASM_LOCATION("MacroAssembler::InvokeBuiltin"); | |
| 1842 // You can't call a builtin without a valid frame. | |
| 1843 ASSERT(flag == JUMP_FUNCTION || has_frame()); | |
| 1844 | |
| 1845 GetBuiltinEntry(x2, id); | |
| 1846 if (flag == CALL_FUNCTION) { | |
| 1847 call_wrapper.BeforeCall(CallSize(x2)); | |
| 1848 Call(x2); | |
| 1849 call_wrapper.AfterCall(); | |
| 1850 } else { | |
| 1851 ASSERT(flag == JUMP_FUNCTION); | |
| 1852 Jump(x2); | |
| 1853 } | |
| 1854 } | |
| 1855 | |
| 1856 | |
| 1857 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, | |
| 1858 int num_arguments, | |
| 1859 int result_size) { | |
| 1860 // TODO(1236192): Most runtime routines don't need the number of | |
| 1861 // arguments passed in because it is constant. At some point we | |
| 1862 // should remove this need and make the runtime routine entry code | |
| 1863 // smarter. | |
| 1864 Mov(x0, num_arguments); | |
| 1865 JumpToExternalReference(ext); | |
| 1866 } | |
| 1867 | |
| 1868 | |
| 1869 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, | |
| 1870 int num_arguments, | |
| 1871 int result_size) { | |
| 1872 TailCallExternalReference(ExternalReference(fid, isolate()), | |
| 1873 num_arguments, | |
| 1874 result_size); | |
| 1875 } | |
| 1876 | |
| 1877 | |
| 1878 void MacroAssembler::InitializeNewString(Register string, | |
| 1879 Register length, | |
| 1880 Heap::RootListIndex map_index, | |
| 1881 Register scratch1, | |
| 1882 Register scratch2) { | |
| 1883 ASSERT(!AreAliased(string, length, scratch1, scratch2)); | |
| 1884 LoadRoot(scratch2, map_index); | |
| 1885 SmiTag(scratch1, length); | |
| 1886 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); | |
| 1887 | |
| 1888 Mov(scratch2, String::kEmptyHashField); | |
| 1889 Str(scratch1, FieldMemOperand(string, String::kLengthOffset)); | |
| 1890 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset)); | |
| 1891 } | |
| 1892 | |
| 1893 | |
| 1894 int MacroAssembler::ActivationFrameAlignment() { | |
| 1895 #if V8_HOST_ARCH_A64 | |
| 1896 // Running on the real platform. Use the alignment as mandated by the local | |
| 1897 // environment. | |
| 1898 // Note: This will break if we ever start generating snapshots on one ARM | |
| 1899 // platform for another ARM platform with a different alignment. | |
| 1900 return OS::ActivationFrameAlignment(); | |
| 1901 #else // V8_HOST_ARCH_A64 | |
| 1902 // If we are using the simulator then we should always align to the expected | |
| 1903 // alignment. As the simulator is used to generate snapshots we do not know | |
| 1904 // if the target platform will need alignment, so this is controlled from a | |
| 1905 // flag. | |
| 1906 return FLAG_sim_stack_alignment; | |
| 1907 #endif // V8_HOST_ARCH_A64 | |
| 1908 } | |
| 1909 | |
| 1910 | |
| 1911 void MacroAssembler::CallCFunction(ExternalReference function, | |
| 1912 int num_of_reg_args) { | |
| 1913 CallCFunction(function, num_of_reg_args, 0); | |
| 1914 } | |
| 1915 | |
| 1916 | |
| 1917 void MacroAssembler::CallCFunction(ExternalReference function, | |
| 1918 int num_of_reg_args, | |
| 1919 int num_of_double_args) { | |
| 1920 Mov(Tmp0(), Operand(function)); | |
| 1921 CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args); | |
| 1922 } | |
| 1923 | |
| 1924 | |
| 1925 void MacroAssembler::CallCFunction(Register function, | |
| 1926 int num_of_reg_args, | |
| 1927 int num_of_double_args) { | |
| 1928 ASSERT(has_frame()); | |
| 1929 // We can pass 8 integer arguments in registers. If we need to pass more than | |
| 1930 // that, we'll need to implement support for passing them on the stack. | |
| 1931 ASSERT(num_of_reg_args <= 8); | |
| 1932 | |
| 1933 // If we're passing doubles, we're limited to the following prototypes | |
| 1934 // (defined by ExternalReference::Type): | |
| 1935 // BUILTIN_COMPARE_CALL: int f(double, double) | |
| 1936 // BUILTIN_FP_FP_CALL: double f(double, double) | |
| 1937 // BUILTIN_FP_CALL: double f(double) | |
| 1938 // BUILTIN_FP_INT_CALL: double f(double, int) | |
| 1939 if (num_of_double_args > 0) { | |
| 1940 ASSERT(num_of_reg_args <= 1); | |
| 1941 ASSERT((num_of_double_args + num_of_reg_args) <= 2); | |
| 1942 } | |
| 1943 | |
| 1944 | |
| 1945 // If the stack pointer is not csp, we need to derive an aligned csp from the | |
| 1946 // current stack pointer. | |
| 1947 const Register old_stack_pointer = StackPointer(); | |
| 1948 if (!csp.Is(old_stack_pointer)) { | |
| 1949 AssertStackConsistency(); | |
| 1950 | |
| 1951 int sp_alignment = ActivationFrameAlignment(); | |
| 1952 // The ABI mandates at least 16-byte alignment. | |
| 1953 ASSERT(sp_alignment >= 16); | |
| 1954 ASSERT(IsPowerOf2(sp_alignment)); | |
| 1955 | |
| 1956 // The current stack pointer is a callee saved register, and is preserved | |
| 1957 // across the call. | |
| 1958 ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer)); | |
| 1959 | |
| 1960 // Align and synchronize the system stack pointer with jssp. | |
| 1961 Bic(csp, old_stack_pointer, sp_alignment - 1); | |
| 1962 SetStackPointer(csp); | |
| 1963 } | |
| 1964 | |
| 1965 // Call directly. The function called cannot cause a GC, or allow preemption, | |
| 1966 // so the return address in the link register stays correct. | |
| 1967 Call(function); | |
| 1968 | |
| 1969 if (!csp.Is(old_stack_pointer)) { | |
| 1970 if (emit_debug_code()) { | |
| 1971 // Because the stack pointer must be aligned on a 16-byte boundary, the | |
| 1972 // aligned csp can be up to 12 bytes below the jssp. This is the case | |
| 1973 // where we only pushed one W register on top of an aligned jssp. | |
| 1974 Register temp = Tmp1(); | |
| 1975 ASSERT(ActivationFrameAlignment() == 16); | |
| 1976 Sub(temp, csp, old_stack_pointer); | |
| 1977 // We want temp <= 0 && temp >= -12. | |
| 1978 Cmp(temp, 0); | |
| 1979 Ccmp(temp, -12, NFlag, le); | |
| 1980 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); | |
| 1981 } | |
| 1982 SetStackPointer(old_stack_pointer); | |
| 1983 } | |
| 1984 } | |
| 1985 | |
| 1986 | |
| 1987 void MacroAssembler::Jump(Register target) { | |
| 1988 Br(target); | |
| 1989 } | |
| 1990 | |
| 1991 | |
| 1992 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { | |
| 1993 Mov(Tmp0(), Operand(target, rmode)); | |
| 1994 Br(Tmp0()); | |
| 1995 } | |
| 1996 | |
| 1997 | |
| 1998 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { | |
| 1999 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | |
| 2000 Jump(reinterpret_cast<intptr_t>(target), rmode); | |
| 2001 } | |
| 2002 | |
| 2003 | |
| 2004 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { | |
| 2005 ASSERT(RelocInfo::IsCodeTarget(rmode)); | |
| 2006 AllowDeferredHandleDereference embedding_raw_address; | |
| 2007 Jump(reinterpret_cast<intptr_t>(code.location()), rmode); | |
| 2008 } | |
| 2009 | |
| 2010 | |
| 2011 void MacroAssembler::Call(Register target) { | |
| 2012 BlockConstPoolScope scope(this); | |
| 2013 #ifdef DEBUG | |
| 2014 Label start_call; | |
| 2015 Bind(&start_call); | |
| 2016 #endif | |
| 2017 | |
| 2018 Blr(target); | |
| 2019 | |
| 2020 #ifdef DEBUG | |
| 2021 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); | |
| 2022 #endif | |
| 2023 } | |
| 2024 | |
| 2025 | |
| 2026 void MacroAssembler::Call(Label* target) { | |
| 2027 BlockConstPoolScope scope(this); | |
| 2028 #ifdef DEBUG | |
| 2029 Label start_call; | |
| 2030 Bind(&start_call); | |
| 2031 #endif | |
| 2032 | |
| 2033 Bl(target); | |
| 2034 | |
| 2035 #ifdef DEBUG | |
| 2036 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); | |
| 2037 #endif | |
| 2038 } | |
| 2039 | |
| 2040 | |
| 2041 // MacroAssembler::CallSize is sensitive to changes in this function, as it | |
| 2042 // requires to know how many instructions are used to branch to the target. | |
| 2043 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { | |
| 2044 BlockConstPoolScope scope(this); | |
| 2045 #ifdef DEBUG | |
| 2046 Label start_call; | |
| 2047 Bind(&start_call); | |
| 2048 #endif | |
| 2049 // Statement positions are expected to be recorded when the target | |
| 2050 // address is loaded. | |
| 2051 positions_recorder()->WriteRecordedPositions(); | |
| 2052 | |
| 2053 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | |
| 2054 ASSERT(rmode != RelocInfo::NONE32); | |
| 2055 | |
| 2056 if (rmode == RelocInfo::NONE64) { | |
| 2057 uint64_t imm = reinterpret_cast<uint64_t>(target); | |
| 2058 movz(Tmp0(), (imm >> 0) & 0xffff, 0); | |
| 2059 movk(Tmp0(), (imm >> 16) & 0xffff, 16); | |
| 2060 movk(Tmp0(), (imm >> 32) & 0xffff, 32); | |
| 2061 movk(Tmp0(), (imm >> 48) & 0xffff, 48); | |
| 2062 } else { | |
| 2063 LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode)); | |
| 2064 } | |
| 2065 Blr(Tmp0()); | |
| 2066 #ifdef DEBUG | |
| 2067 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); | |
| 2068 #endif | |
| 2069 } | |
| 2070 | |
| 2071 | |
| 2072 void MacroAssembler::Call(Handle<Code> code, | |
| 2073 RelocInfo::Mode rmode, | |
| 2074 TypeFeedbackId ast_id) { | |
| 2075 #ifdef DEBUG | |
| 2076 Label start_call; | |
| 2077 Bind(&start_call); | |
| 2078 #endif | |
| 2079 | |
| 2080 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) { | |
| 2081 SetRecordedAstId(ast_id); | |
| 2082 rmode = RelocInfo::CODE_TARGET_WITH_ID; | |
| 2083 } | |
| 2084 | |
| 2085 AllowDeferredHandleDereference embedding_raw_address; | |
| 2086 Call(reinterpret_cast<Address>(code.location()), rmode); | |
| 2087 | |
| 2088 #ifdef DEBUG | |
| 2089 // Check the size of the code generated. | |
| 2090 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id)); | |
| 2091 #endif | |
| 2092 } | |
| 2093 | |
| 2094 | |
| 2095 int MacroAssembler::CallSize(Register target) { | |
| 2096 USE(target); | |
| 2097 return kInstructionSize; | |
| 2098 } | |
| 2099 | |
| 2100 | |
| 2101 int MacroAssembler::CallSize(Label* target) { | |
| 2102 USE(target); | |
| 2103 return kInstructionSize; | |
| 2104 } | |
| 2105 | |
| 2106 | |
| 2107 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) { | |
| 2108 USE(target); | |
| 2109 | |
| 2110 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | |
| 2111 ASSERT(rmode != RelocInfo::NONE32); | |
| 2112 | |
| 2113 if (rmode == RelocInfo::NONE64) { | |
| 2114 return kCallSizeWithoutRelocation; | |
| 2115 } else { | |
| 2116 return kCallSizeWithRelocation; | |
| 2117 } | |
| 2118 } | |
| 2119 | |
| 2120 | |
| 2121 int MacroAssembler::CallSize(Handle<Code> code, | |
| 2122 RelocInfo::Mode rmode, | |
| 2123 TypeFeedbackId ast_id) { | |
| 2124 USE(code); | |
| 2125 USE(ast_id); | |
| 2126 | |
| 2127 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | |
| 2128 ASSERT(rmode != RelocInfo::NONE32); | |
| 2129 | |
| 2130 if (rmode == RelocInfo::NONE64) { | |
| 2131 return kCallSizeWithoutRelocation; | |
| 2132 } else { | |
| 2133 return kCallSizeWithRelocation; | |
| 2134 } | |
| 2135 } | |
| 2136 | |
| 2137 | |
| 2138 | |
| 2139 | |
| 2140 | |
| 2141 void MacroAssembler::JumpForHeapNumber(Register object, | |
| 2142 Register heap_number_map, | |
| 2143 Label* on_heap_number, | |
| 2144 Label* on_not_heap_number) { | |
| 2145 ASSERT(on_heap_number || on_not_heap_number); | |
| 2146 // Tmp0() is used as a scratch register. | |
| 2147 ASSERT(!AreAliased(Tmp0(), heap_number_map)); | |
| 2148 AssertNotSmi(object); | |
| 2149 | |
| 2150 // Load the HeapNumber map if it is not passed. | |
| 2151 if (heap_number_map.Is(NoReg)) { | |
| 2152 heap_number_map = Tmp1(); | |
| 2153 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 2154 } else { | |
| 2155 // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map. | |
| 2156 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 2157 } | |
| 2158 | |
| 2159 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 2160 Cmp(Tmp0(), heap_number_map); | |
| 2161 | |
| 2162 if (on_heap_number) { | |
| 2163 B(eq, on_heap_number); | |
| 2164 } | |
| 2165 if (on_not_heap_number) { | |
| 2166 B(ne, on_not_heap_number); | |
| 2167 } | |
| 2168 } | |
| 2169 | |
| 2170 | |
| 2171 void MacroAssembler::JumpIfHeapNumber(Register object, | |
| 2172 Label* on_heap_number, | |
| 2173 Register heap_number_map) { | |
| 2174 JumpForHeapNumber(object, | |
| 2175 heap_number_map, | |
| 2176 on_heap_number, | |
| 2177 NULL); | |
| 2178 } | |
| 2179 | |
| 2180 | |
| 2181 void MacroAssembler::JumpIfNotHeapNumber(Register object, | |
| 2182 Label* on_not_heap_number, | |
| 2183 Register heap_number_map) { | |
| 2184 JumpForHeapNumber(object, | |
| 2185 heap_number_map, | |
| 2186 NULL, | |
| 2187 on_not_heap_number); | |
| 2188 } | |
| 2189 | |
| 2190 | |
| 2191 void MacroAssembler::LookupNumberStringCache(Register object, | |
| 2192 Register result, | |
| 2193 Register scratch1, | |
| 2194 Register scratch2, | |
| 2195 Register scratch3, | |
| 2196 Label* not_found) { | |
| 2197 ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3)); | |
| 2198 | |
| 2199 // Use of registers. Register result is used as a temporary. | |
| 2200 Register number_string_cache = result; | |
| 2201 Register mask = scratch3; | |
| 2202 | |
| 2203 // Load the number string cache. | |
| 2204 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | |
| 2205 | |
| 2206 // Make the hash mask from the length of the number string cache. It | |
| 2207 // contains two elements (number and string) for each cache entry. | |
| 2208 Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache, | |
| 2209 FixedArray::kLengthOffset)); | |
| 2210 Asr(mask, mask, 1); // Divide length by two. | |
| 2211 Sub(mask, mask, 1); // Make mask. | |
| 2212 | |
| 2213 // Calculate the entry in the number string cache. The hash value in the | |
| 2214 // number string cache for smis is just the smi value, and the hash for | |
| 2215 // doubles is the xor of the upper and lower words. See | |
| 2216 // Heap::GetNumberStringCache. | |
| 2217 Label is_smi; | |
| 2218 Label load_result_from_cache; | |
| 2219 | |
| 2220 JumpIfSmi(object, &is_smi); | |
| 2221 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, | |
| 2222 DONT_DO_SMI_CHECK); | |
| 2223 | |
| 2224 STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2)); | |
| 2225 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag); | |
| 2226 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1)); | |
| 2227 Eor(scratch1, scratch1, scratch2); | |
| 2228 And(scratch1, scratch1, mask); | |
| 2229 | |
| 2230 // Calculate address of entry in string cache: each entry consists of two | |
| 2231 // pointer sized fields. | |
| 2232 Add(scratch1, number_string_cache, | |
| 2233 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | |
| 2234 | |
| 2235 Register probe = mask; | |
| 2236 Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | |
| 2237 JumpIfSmi(probe, not_found); | |
| 2238 Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 2239 Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset)); | |
| 2240 Fcmp(d0, d1); | |
| 2241 B(ne, not_found); | |
| 2242 B(&load_result_from_cache); | |
| 2243 | |
| 2244 Bind(&is_smi); | |
| 2245 Register scratch = scratch1; | |
| 2246 And(scratch, mask, Operand::UntagSmi(object)); | |
| 2247 // Calculate address of entry in string cache: each entry consists | |
| 2248 // of two pointer sized fields. | |
| 2249 Add(scratch, number_string_cache, | |
| 2250 Operand(scratch, LSL, kPointerSizeLog2 + 1)); | |
| 2251 | |
| 2252 // Check if the entry is the smi we are looking for. | |
| 2253 Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | |
| 2254 Cmp(object, probe); | |
| 2255 B(ne, not_found); | |
| 2256 | |
| 2257 // Get the result from the cache. | |
| 2258 Bind(&load_result_from_cache); | |
| 2259 Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); | |
| 2260 IncrementCounter(isolate()->counters()->number_to_string_native(), 1, | |
| 2261 scratch1, scratch2); | |
| 2262 } | |
| 2263 | |
| 2264 | |
| 2265 void MacroAssembler::TryConvertDoubleToInt(Register as_int, | |
| 2266 FPRegister value, | |
| 2267 FPRegister scratch_d, | |
| 2268 Label* on_successful_conversion, | |
| 2269 Label* on_failed_conversion) { | |
| 2270 // Convert to an int and back again, then compare with the original value. | |
| 2271 Fcvtzs(as_int, value); | |
| 2272 Scvtf(scratch_d, as_int); | |
| 2273 Fcmp(value, scratch_d); | |
| 2274 | |
| 2275 if (on_successful_conversion) { | |
| 2276 B(on_successful_conversion, eq); | |
| 2277 } | |
| 2278 if (on_failed_conversion) { | |
| 2279 B(on_failed_conversion, ne); | |
| 2280 } | |
| 2281 } | |
| 2282 | |
| 2283 | |
| 2284 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, | |
| 2285 Label* on_negative_zero) { | |
| 2286 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will | |
| 2287 // cause overflow. | |
| 2288 Fmov(Tmp0(), input); | |
| 2289 Cmp(Tmp0(), 1); | |
| 2290 B(vs, on_negative_zero); | |
| 2291 } | |
| 2292 | |
| 2293 | |
| 2294 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { | |
| 2295 // Clamp the value to [0..255]. | |
| 2296 Cmp(input.W(), Operand(input.W(), UXTB)); | |
| 2297 // If input < input & 0xff, it must be < 0, so saturate to 0. | |
| 2298 Csel(output.W(), wzr, input.W(), lt); | |
| 2299 // Create a constant 0xff. | |
| 2300 Mov(WTmp0(), 255); | |
| 2301 // If input > input & 0xff, it must be > 255, so saturate to 255. | |
| 2302 Csel(output.W(), WTmp0(), output.W(), gt); | |
| 2303 } | |
| 2304 | |
| 2305 | |
| 2306 void MacroAssembler::ClampInt32ToUint8(Register in_out) { | |
| 2307 ClampInt32ToUint8(in_out, in_out); | |
| 2308 } | |
| 2309 | |
| 2310 | |
| 2311 void MacroAssembler::ClampDoubleToUint8(Register output, | |
| 2312 DoubleRegister input, | |
| 2313 DoubleRegister dbl_scratch) { | |
| 2314 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types: | |
| 2315 // - Inputs lower than 0 (including -infinity) produce 0. | |
| 2316 // - Inputs higher than 255 (including +infinity) produce 255. | |
| 2317 // Also, it seems that PIXEL types use round-to-nearest rather than | |
| 2318 // round-towards-zero. | |
| 2319 | |
| 2320 // Squash +infinity before the conversion, since Fcvtnu will normally | |
| 2321 // convert it to 0. | |
| 2322 Fmov(dbl_scratch, 255); | |
| 2323 Fmin(dbl_scratch, dbl_scratch, input); | |
| 2324 | |
| 2325 // Convert double to unsigned integer. Values less than zero become zero. | |
| 2326 // Values greater than 255 have already been clamped to 255. | |
| 2327 Fcvtnu(output, dbl_scratch); | |
| 2328 } | |
| 2329 | |
| 2330 | |
| 2331 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, | |
| 2332 Register src, | |
| 2333 unsigned count, | |
| 2334 Register scratch1, | |
| 2335 Register scratch2, | |
| 2336 Register scratch3) { | |
| 2337 // Untag src and dst into scratch registers. | |
| 2338 // Copy src->dst in a tight loop. | |
| 2339 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1())); | |
| 2340 ASSERT(count >= 2); | |
| 2341 | |
| 2342 const Register& remaining = scratch3; | |
| 2343 Mov(remaining, count / 2); | |
| 2344 | |
| 2345 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2346 InstructionAccurateScope scope(this); | |
| 2347 | |
| 2348 const Register& dst_untagged = scratch1; | |
| 2349 const Register& src_untagged = scratch2; | |
| 2350 sub(dst_untagged, dst, kHeapObjectTag); | |
| 2351 sub(src_untagged, src, kHeapObjectTag); | |
| 2352 | |
| 2353 // Copy fields in pairs. | |
| 2354 Label loop; | |
| 2355 bind(&loop); | |
| 2356 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, | |
| 2357 PostIndex)); | |
| 2358 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, | |
| 2359 PostIndex)); | |
| 2360 sub(remaining, remaining, 1); | |
| 2361 cbnz(remaining, &loop); | |
| 2362 | |
| 2363 // Handle the leftovers. | |
| 2364 if (count & 1) { | |
| 2365 ldr(Tmp0(), MemOperand(src_untagged)); | |
| 2366 str(Tmp0(), MemOperand(dst_untagged)); | |
| 2367 } | |
| 2368 } | |
| 2369 | |
| 2370 | |
| 2371 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, | |
| 2372 Register src, | |
| 2373 unsigned count, | |
| 2374 Register scratch1, | |
| 2375 Register scratch2) { | |
| 2376 // Untag src and dst into scratch registers. | |
| 2377 // Copy src->dst in an unrolled loop. | |
| 2378 ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1())); | |
| 2379 | |
| 2380 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2381 InstructionAccurateScope scope(this); | |
| 2382 | |
| 2383 const Register& dst_untagged = scratch1; | |
| 2384 const Register& src_untagged = scratch2; | |
| 2385 sub(dst_untagged, dst, kHeapObjectTag); | |
| 2386 sub(src_untagged, src, kHeapObjectTag); | |
| 2387 | |
| 2388 // Copy fields in pairs. | |
| 2389 for (unsigned i = 0; i < count / 2; i++) { | |
| 2390 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, | |
| 2391 PostIndex)); | |
| 2392 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, | |
| 2393 PostIndex)); | |
| 2394 } | |
| 2395 | |
| 2396 // Handle the leftovers. | |
| 2397 if (count & 1) { | |
| 2398 ldr(Tmp0(), MemOperand(src_untagged)); | |
| 2399 str(Tmp0(), MemOperand(dst_untagged)); | |
| 2400 } | |
| 2401 } | |
| 2402 | |
| 2403 | |
| 2404 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, | |
| 2405 Register src, | |
| 2406 unsigned count, | |
| 2407 Register scratch1) { | |
| 2408 // Untag src and dst into scratch registers. | |
| 2409 // Copy src->dst in an unrolled loop. | |
| 2410 ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1())); | |
| 2411 | |
| 2412 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2413 InstructionAccurateScope scope(this); | |
| 2414 | |
| 2415 const Register& dst_untagged = scratch1; | |
| 2416 const Register& src_untagged = Tmp1(); | |
| 2417 sub(dst_untagged, dst, kHeapObjectTag); | |
| 2418 sub(src_untagged, src, kHeapObjectTag); | |
| 2419 | |
| 2420 // Copy fields one by one. | |
| 2421 for (unsigned i = 0; i < count; i++) { | |
| 2422 ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex)); | |
| 2423 str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex)); | |
| 2424 } | |
| 2425 } | |
| 2426 | |
| 2427 | |
| 2428 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, | |
| 2429 unsigned count) { | |
| 2430 // One of two methods is used: | |
| 2431 // | |
| 2432 // For high 'count' values where many scratch registers are available: | |
| 2433 // Untag src and dst into scratch registers. | |
| 2434 // Copy src->dst in a tight loop. | |
| 2435 // | |
| 2436 // For low 'count' values or where few scratch registers are available: | |
| 2437 // Untag src and dst into scratch registers. | |
| 2438 // Copy src->dst in an unrolled loop. | |
| 2439 // | |
| 2440 // In both cases, fields are copied in pairs if possible, and left-overs are | |
| 2441 // handled separately. | |
| 2442 ASSERT(!temps.IncludesAliasOf(dst)); | |
| 2443 ASSERT(!temps.IncludesAliasOf(src)); | |
| 2444 ASSERT(!temps.IncludesAliasOf(Tmp0())); | |
| 2445 ASSERT(!temps.IncludesAliasOf(Tmp1())); | |
| 2446 ASSERT(!temps.IncludesAliasOf(xzr)); | |
| 2447 ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1())); | |
| 2448 | |
| 2449 if (emit_debug_code()) { | |
| 2450 Cmp(dst, src); | |
| 2451 Check(ne, kTheSourceAndDestinationAreTheSame); | |
| 2452 } | |
| 2453 | |
| 2454 // The value of 'count' at which a loop will be generated (if there are | |
| 2455 // enough scratch registers). | |
| 2456 static const unsigned kLoopThreshold = 8; | |
| 2457 | |
| 2458 ASSERT(!temps.IsEmpty()); | |
| 2459 Register scratch1 = Register(temps.PopLowestIndex()); | |
| 2460 Register scratch2 = Register(temps.PopLowestIndex()); | |
| 2461 Register scratch3 = Register(temps.PopLowestIndex()); | |
| 2462 | |
| 2463 if (scratch3.IsValid() && (count >= kLoopThreshold)) { | |
| 2464 CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3); | |
| 2465 } else if (scratch2.IsValid()) { | |
| 2466 CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2); | |
| 2467 } else if (scratch1.IsValid()) { | |
| 2468 CopyFieldsUnrolledHelper(dst, src, count, scratch1); | |
| 2469 } else { | |
| 2470 UNREACHABLE(); | |
| 2471 } | |
| 2472 } | |
| 2473 | |
| 2474 | |
| 2475 void MacroAssembler::CopyBytes(Register dst, | |
| 2476 Register src, | |
| 2477 Register length, | |
| 2478 Register scratch, | |
| 2479 CopyHint hint) { | |
| 2480 ASSERT(!AreAliased(src, dst, length, scratch)); | |
| 2481 | |
| 2482 // TODO(all): Implement a faster copy function, and use hint to determine | |
| 2483 // which algorithm to use for copies. | |
| 2484 if (emit_debug_code()) { | |
| 2485 // Check copy length. | |
| 2486 Cmp(length, 0); | |
| 2487 Assert(ge, kUnexpectedNegativeValue); | |
| 2488 | |
| 2489 // Check src and dst buffers don't overlap. | |
| 2490 Add(scratch, src, length); // Calculate end of src buffer. | |
| 2491 Cmp(scratch, dst); | |
| 2492 Add(scratch, dst, length); // Calculate end of dst buffer. | |
| 2493 Ccmp(scratch, src, ZFlag, gt); | |
| 2494 Assert(le, kCopyBuffersOverlap); | |
| 2495 } | |
| 2496 | |
| 2497 Label loop, done; | |
| 2498 Cbz(length, &done); | |
| 2499 | |
| 2500 Bind(&loop); | |
| 2501 Sub(length, length, 1); | |
| 2502 Ldrb(scratch, MemOperand(src, 1, PostIndex)); | |
| 2503 Strb(scratch, MemOperand(dst, 1, PostIndex)); | |
| 2504 Cbnz(length, &loop); | |
| 2505 Bind(&done); | |
| 2506 } | |
| 2507 | |
| 2508 | |
| 2509 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, | |
| 2510 Register end_offset, | |
| 2511 Register filler) { | |
| 2512 Label loop, entry; | |
| 2513 B(&entry); | |
| 2514 Bind(&loop); | |
| 2515 // TODO(all): consider using stp here. | |
| 2516 Str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); | |
| 2517 Bind(&entry); | |
| 2518 Cmp(start_offset, end_offset); | |
| 2519 B(lt, &loop); | |
| 2520 } | |
| 2521 | |
| 2522 | |
| 2523 void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings( | |
| 2524 Register first, | |
| 2525 Register second, | |
| 2526 Register scratch1, | |
| 2527 Register scratch2, | |
| 2528 Label* failure, | |
| 2529 SmiCheckType smi_check) { | |
| 2530 | |
| 2531 if (smi_check == DO_SMI_CHECK) { | |
| 2532 JumpIfEitherSmi(first, second, failure); | |
| 2533 } else if (emit_debug_code()) { | |
| 2534 ASSERT(smi_check == DONT_DO_SMI_CHECK); | |
| 2535 Label not_smi; | |
| 2536 JumpIfEitherSmi(first, second, NULL, ¬_smi); | |
| 2537 | |
| 2538 // At least one input is a smi, but the flags indicated a smi check wasn't | |
| 2539 // needed. | |
| 2540 Abort(kUnexpectedSmi); | |
| 2541 | |
| 2542 Bind(¬_smi); | |
| 2543 } | |
| 2544 | |
| 2545 // Test that both first and second are sequential ASCII strings. | |
| 2546 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); | |
| 2547 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); | |
| 2548 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | |
| 2549 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); | |
| 2550 | |
| 2551 JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1, | |
| 2552 scratch2, | |
| 2553 scratch1, | |
| 2554 scratch2, | |
| 2555 failure); | |
| 2556 } | |
| 2557 | |
| 2558 | |
| 2559 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii( | |
| 2560 Register first, | |
| 2561 Register second, | |
| 2562 Register scratch1, | |
| 2563 Register scratch2, | |
| 2564 Label* failure) { | |
| 2565 ASSERT(!AreAliased(scratch1, second)); | |
| 2566 ASSERT(!AreAliased(scratch1, scratch2)); | |
| 2567 static const int kFlatAsciiStringMask = | |
| 2568 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | |
| 2569 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE; | |
| 2570 And(scratch1, first, kFlatAsciiStringMask); | |
| 2571 And(scratch2, second, kFlatAsciiStringMask); | |
| 2572 Cmp(scratch1, kFlatAsciiStringTag); | |
| 2573 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); | |
| 2574 B(ne, failure); | |
| 2575 } | |
| 2576 | |
| 2577 | |
| 2578 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, | |
| 2579 Register scratch, | |
| 2580 Label* failure) { | |
| 2581 const int kFlatAsciiStringMask = | |
| 2582 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | |
| 2583 const int kFlatAsciiStringTag = | |
| 2584 kStringTag | kOneByteStringTag | kSeqStringTag; | |
| 2585 And(scratch, type, kFlatAsciiStringMask); | |
| 2586 Cmp(scratch, kFlatAsciiStringTag); | |
| 2587 B(ne, failure); | |
| 2588 } | |
| 2589 | |
| 2590 | |
| 2591 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( | |
| 2592 Register first, | |
| 2593 Register second, | |
| 2594 Register scratch1, | |
| 2595 Register scratch2, | |
| 2596 Label* failure) { | |
| 2597 ASSERT(!AreAliased(first, second, scratch1, scratch2)); | |
| 2598 const int kFlatAsciiStringMask = | |
| 2599 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | |
| 2600 const int kFlatAsciiStringTag = | |
| 2601 kStringTag | kOneByteStringTag | kSeqStringTag; | |
| 2602 And(scratch1, first, kFlatAsciiStringMask); | |
| 2603 And(scratch2, second, kFlatAsciiStringMask); | |
| 2604 Cmp(scratch1, kFlatAsciiStringTag); | |
| 2605 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); | |
| 2606 B(ne, failure); | |
| 2607 } | |
| 2608 | |
| 2609 | |
| 2610 void MacroAssembler::JumpIfNotUniqueName(Register type, | |
| 2611 Label* not_unique_name) { | |
| 2612 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | |
| 2613 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) { | |
| 2614 // continue | |
| 2615 // } else { | |
| 2616 // goto not_unique_name | |
| 2617 // } | |
| 2618 Tst(type, kIsNotStringMask | kIsNotInternalizedMask); | |
| 2619 Ccmp(type, SYMBOL_TYPE, ZFlag, ne); | |
| 2620 B(ne, not_unique_name); | |
| 2621 } | |
| 2622 | |
| 2623 | |
| 2624 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | |
| 2625 const ParameterCount& actual, | |
| 2626 Handle<Code> code_constant, | |
| 2627 Register code_reg, | |
| 2628 Label* done, | |
| 2629 InvokeFlag flag, | |
| 2630 bool* definitely_mismatches, | |
| 2631 const CallWrapper& call_wrapper) { | |
| 2632 bool definitely_matches = false; | |
| 2633 *definitely_mismatches = false; | |
| 2634 Label regular_invoke; | |
| 2635 | |
| 2636 // Check whether the expected and actual arguments count match. If not, | |
| 2637 // setup registers according to contract with ArgumentsAdaptorTrampoline: | |
| 2638 // x0: actual arguments count. | |
| 2639 // x1: function (passed through to callee). | |
| 2640 // x2: expected arguments count. | |
| 2641 | |
| 2642 // The code below is made a lot easier because the calling code already sets | |
| 2643 // up actual and expected registers according to the contract if values are | |
| 2644 // passed in registers. | |
| 2645 ASSERT(actual.is_immediate() || actual.reg().is(x0)); | |
| 2646 ASSERT(expected.is_immediate() || expected.reg().is(x2)); | |
| 2647 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3)); | |
| 2648 | |
| 2649 if (expected.is_immediate()) { | |
| 2650 ASSERT(actual.is_immediate()); | |
| 2651 if (expected.immediate() == actual.immediate()) { | |
| 2652 definitely_matches = true; | |
| 2653 | |
| 2654 } else { | |
| 2655 Mov(x0, actual.immediate()); | |
| 2656 if (expected.immediate() == | |
| 2657 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { | |
| 2658 // Don't worry about adapting arguments for builtins that | |
| 2659 // don't want that done. Skip adaption code by making it look | |
| 2660 // like we have a match between expected and actual number of | |
| 2661 // arguments. | |
| 2662 definitely_matches = true; | |
| 2663 } else { | |
| 2664 *definitely_mismatches = true; | |
| 2665 // Set up x2 for the argument adaptor. | |
| 2666 Mov(x2, expected.immediate()); | |
| 2667 } | |
| 2668 } | |
| 2669 | |
| 2670 } else { // expected is a register. | |
| 2671 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate()) | |
| 2672 : Operand(actual.reg()); | |
| 2673 // If actual == expected perform a regular invocation. | |
| 2674 Cmp(expected.reg(), actual_op); | |
| 2675 B(eq, ®ular_invoke); | |
| 2676 // Otherwise set up x0 for the argument adaptor. | |
| 2677 Mov(x0, actual_op); | |
| 2678 } | |
| 2679 | |
| 2680 // If the argument counts may mismatch, generate a call to the argument | |
| 2681 // adaptor. | |
| 2682 if (!definitely_matches) { | |
| 2683 if (!code_constant.is_null()) { | |
| 2684 Mov(x3, Operand(code_constant)); | |
| 2685 Add(x3, x3, Code::kHeaderSize - kHeapObjectTag); | |
| 2686 } | |
| 2687 | |
| 2688 Handle<Code> adaptor = | |
| 2689 isolate()->builtins()->ArgumentsAdaptorTrampoline(); | |
| 2690 if (flag == CALL_FUNCTION) { | |
| 2691 call_wrapper.BeforeCall(CallSize(adaptor)); | |
| 2692 Call(adaptor); | |
| 2693 call_wrapper.AfterCall(); | |
| 2694 if (!*definitely_mismatches) { | |
| 2695 // If the arg counts don't match, no extra code is emitted by | |
| 2696 // MAsm::InvokeCode and we can just fall through. | |
| 2697 B(done); | |
| 2698 } | |
| 2699 } else { | |
| 2700 Jump(adaptor, RelocInfo::CODE_TARGET); | |
| 2701 } | |
| 2702 } | |
| 2703 Bind(®ular_invoke); | |
| 2704 } | |
| 2705 | |
| 2706 | |
| 2707 void MacroAssembler::InvokeCode(Register code, | |
| 2708 const ParameterCount& expected, | |
| 2709 const ParameterCount& actual, | |
| 2710 InvokeFlag flag, | |
| 2711 const CallWrapper& call_wrapper) { | |
| 2712 // You can't call a function without a valid frame. | |
| 2713 ASSERT(flag == JUMP_FUNCTION || has_frame()); | |
| 2714 | |
| 2715 Label done; | |
| 2716 | |
| 2717 bool definitely_mismatches = false; | |
| 2718 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, | |
| 2719 &definitely_mismatches, call_wrapper); | |
| 2720 | |
| 2721 // If we are certain that actual != expected, then we know InvokePrologue will | |
| 2722 // have handled the call through the argument adaptor mechanism. | |
| 2723 // The called function expects the call kind in x5. | |
| 2724 if (!definitely_mismatches) { | |
| 2725 if (flag == CALL_FUNCTION) { | |
| 2726 call_wrapper.BeforeCall(CallSize(code)); | |
| 2727 Call(code); | |
| 2728 call_wrapper.AfterCall(); | |
| 2729 } else { | |
| 2730 ASSERT(flag == JUMP_FUNCTION); | |
| 2731 Jump(code); | |
| 2732 } | |
| 2733 } | |
| 2734 | |
| 2735 // Continue here if InvokePrologue does handle the invocation due to | |
| 2736 // mismatched parameter counts. | |
| 2737 Bind(&done); | |
| 2738 } | |
| 2739 | |
| 2740 | |
| 2741 void MacroAssembler::InvokeFunction(Register function, | |
| 2742 const ParameterCount& actual, | |
| 2743 InvokeFlag flag, | |
| 2744 const CallWrapper& call_wrapper) { | |
| 2745 // You can't call a function without a valid frame. | |
| 2746 ASSERT(flag == JUMP_FUNCTION || has_frame()); | |
| 2747 | |
| 2748 // Contract with called JS functions requires that function is passed in x1. | |
| 2749 // (See FullCodeGenerator::Generate().) | |
| 2750 ASSERT(function.is(x1)); | |
| 2751 | |
| 2752 Register expected_reg = x2; | |
| 2753 Register code_reg = x3; | |
| 2754 | |
| 2755 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); | |
| 2756 // The number of arguments is stored as an int32_t, and -1 is a marker | |
| 2757 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign | |
| 2758 // extension to correctly handle it. | |
| 2759 Ldr(expected_reg, FieldMemOperand(function, | |
| 2760 JSFunction::kSharedFunctionInfoOffset)); | |
| 2761 Ldrsw(expected_reg, | |
| 2762 FieldMemOperand(expected_reg, | |
| 2763 SharedFunctionInfo::kFormalParameterCountOffset)); | |
| 2764 Ldr(code_reg, | |
| 2765 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | |
| 2766 | |
| 2767 ParameterCount expected(expected_reg); | |
| 2768 InvokeCode(code_reg, expected, actual, flag, call_wrapper); | |
| 2769 } | |
| 2770 | |
| 2771 | |
| 2772 void MacroAssembler::InvokeFunction(Register function, | |
| 2773 const ParameterCount& expected, | |
| 2774 const ParameterCount& actual, | |
| 2775 InvokeFlag flag, | |
| 2776 const CallWrapper& call_wrapper) { | |
| 2777 // You can't call a function without a valid frame. | |
| 2778 ASSERT(flag == JUMP_FUNCTION || has_frame()); | |
| 2779 | |
| 2780 // Contract with called JS functions requires that function is passed in x1. | |
| 2781 // (See FullCodeGenerator::Generate().) | |
| 2782 ASSERT(function.Is(x1)); | |
| 2783 | |
| 2784 Register code_reg = x3; | |
| 2785 | |
| 2786 // Set up the context. | |
| 2787 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); | |
| 2788 | |
| 2789 // We call indirectly through the code field in the function to | |
| 2790 // allow recompilation to take effect without changing any of the | |
| 2791 // call sites. | |
| 2792 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | |
| 2793 InvokeCode(code_reg, expected, actual, flag, call_wrapper); | |
| 2794 } | |
| 2795 | |
| 2796 | |
| 2797 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | |
| 2798 const ParameterCount& expected, | |
| 2799 const ParameterCount& actual, | |
| 2800 InvokeFlag flag, | |
| 2801 const CallWrapper& call_wrapper) { | |
| 2802 // Contract with called JS functions requires that function is passed in x1. | |
| 2803 // (See FullCodeGenerator::Generate().) | |
| 2804 __ LoadObject(x1, function); | |
| 2805 InvokeFunction(x1, expected, actual, flag, call_wrapper); | |
| 2806 } | |
| 2807 | |
| 2808 | |
| 2809 void MacroAssembler::TryConvertDoubleToInt64(Register result, | |
| 2810 DoubleRegister double_input, | |
| 2811 Label* done) { | |
| 2812 // Try to convert with an FPU convert instruction. It's trivial to compute | |
| 2813 // the modulo operation on an integer register so we convert to a 64-bit | |
| 2814 // integer. | |
| 2815 // | |
| 2816 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) | |
| 2817 // when the double is out of range. NaNs and infinities will be converted to 0 | |
| 2818 // (as ECMA-262 requires). | |
| 2819 Fcvtzs(result.X(), double_input); | |
| 2820 | |
| 2821 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not | |
| 2822 // representable using a double, so if the result is one of those then we know | |
| 2823 // that saturation occured, and we need to manually handle the conversion. | |
| 2824 // | |
| 2825 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting | |
| 2826 // 1 will cause signed overflow. | |
| 2827 Cmp(result.X(), 1); | |
| 2828 Ccmp(result.X(), -1, VFlag, vc); | |
| 2829 | |
| 2830 B(vc, done); | |
| 2831 } | |
| 2832 | |
| 2833 | |
| 2834 void MacroAssembler::TruncateDoubleToI(Register result, | |
| 2835 DoubleRegister double_input) { | |
| 2836 Label done; | |
| 2837 ASSERT(jssp.Is(StackPointer())); | |
| 2838 | |
| 2839 // Try to convert the double to an int64. If successful, the bottom 32 bits | |
| 2840 // contain our truncated int32 result. | |
| 2841 TryConvertDoubleToInt64(result, double_input, &done); | |
| 2842 | |
| 2843 // If we fell through then inline version didn't succeed - call stub instead. | |
| 2844 Push(lr); | |
| 2845 Push(double_input); // Put input on stack. | |
| 2846 | |
| 2847 DoubleToIStub stub(jssp, | |
| 2848 result, | |
| 2849 0, | |
| 2850 true, // is_truncating | |
| 2851 true); // skip_fastpath | |
| 2852 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber | |
| 2853 | |
| 2854 Drop(1, kDoubleSize); // Drop the double input on the stack. | |
| 2855 Pop(lr); | |
| 2856 | |
| 2857 Bind(&done); | |
| 2858 | |
| 2859 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: | |
| 2860 // https://code.google.com/p/v8/issues/detail?id=3149 | |
| 2861 Sxtw(result, result.W()); | |
| 2862 } | |
| 2863 | |
| 2864 | |
| 2865 void MacroAssembler::TruncateHeapNumberToI(Register result, | |
| 2866 Register object) { | |
| 2867 Label done; | |
| 2868 ASSERT(!result.is(object)); | |
| 2869 ASSERT(jssp.Is(StackPointer())); | |
| 2870 | |
| 2871 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 2872 | |
| 2873 // Try to convert the double to an int64. If successful, the bottom 32 bits | |
| 2874 // contain our truncated int32 result. | |
| 2875 TryConvertDoubleToInt64(result, fp_scratch, &done); | |
| 2876 | |
| 2877 // If we fell through then inline version didn't succeed - call stub instead. | |
| 2878 Push(lr); | |
| 2879 DoubleToIStub stub(object, | |
| 2880 result, | |
| 2881 HeapNumber::kValueOffset - kHeapObjectTag, | |
| 2882 true, // is_truncating | |
| 2883 true); // skip_fastpath | |
| 2884 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber | |
| 2885 Pop(lr); | |
| 2886 | |
| 2887 Bind(&done); | |
| 2888 | |
| 2889 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: | |
| 2890 // https://code.google.com/p/v8/issues/detail?id=3149 | |
| 2891 Sxtw(result, result.W()); | |
| 2892 } | |
| 2893 | |
| 2894 | |
| 2895 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { | |
| 2896 if (frame_mode == BUILD_STUB_FRAME) { | |
| 2897 ASSERT(StackPointer().Is(jssp)); | |
| 2898 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already | |
| 2899 // have the special STUB smi? | |
| 2900 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB))); | |
| 2901 // Compiled stubs don't age, and so they don't need the predictable code | |
| 2902 // ageing sequence. | |
| 2903 __ Push(lr, fp, cp, Tmp0()); | |
| 2904 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | |
| 2905 } else { | |
| 2906 if (isolate()->IsCodePreAgingActive()) { | |
| 2907 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); | |
| 2908 __ EmitCodeAgeSequence(stub); | |
| 2909 } else { | |
| 2910 __ EmitFrameSetupForCodeAgePatching(); | |
| 2911 } | |
| 2912 } | |
| 2913 } | |
| 2914 | |
| 2915 | |
| 2916 void MacroAssembler::EnterFrame(StackFrame::Type type) { | |
| 2917 ASSERT(jssp.Is(StackPointer())); | |
| 2918 Push(lr, fp, cp); | |
| 2919 Mov(Tmp1(), Operand(Smi::FromInt(type))); | |
| 2920 Mov(Tmp0(), Operand(CodeObject())); | |
| 2921 Push(Tmp1(), Tmp0()); | |
| 2922 // jssp[4] : lr | |
| 2923 // jssp[3] : fp | |
| 2924 // jssp[2] : cp | |
| 2925 // jssp[1] : type | |
| 2926 // jssp[0] : code object | |
| 2927 | |
| 2928 // Adjust FP to point to saved FP. | |
| 2929 add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); | |
| 2930 } | |
| 2931 | |
| 2932 | |
| 2933 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | |
| 2934 ASSERT(jssp.Is(StackPointer())); | |
| 2935 // Drop the execution stack down to the frame pointer and restore | |
| 2936 // the caller frame pointer and return address. | |
| 2937 Mov(jssp, fp); | |
| 2938 AssertStackConsistency(); | |
| 2939 Pop(fp, lr); | |
| 2940 } | |
| 2941 | |
| 2942 | |
| 2943 void MacroAssembler::ExitFramePreserveFPRegs() { | |
| 2944 PushCPURegList(kCallerSavedFP); | |
| 2945 } | |
| 2946 | |
| 2947 | |
| 2948 void MacroAssembler::ExitFrameRestoreFPRegs() { | |
| 2949 // Read the registers from the stack without popping them. The stack pointer | |
| 2950 // will be reset as part of the unwinding process. | |
| 2951 CPURegList saved_fp_regs = kCallerSavedFP; | |
| 2952 ASSERT(saved_fp_regs.Count() % 2 == 0); | |
| 2953 | |
| 2954 int offset = ExitFrameConstants::kLastExitFrameField; | |
| 2955 while (!saved_fp_regs.IsEmpty()) { | |
| 2956 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); | |
| 2957 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); | |
| 2958 offset -= 2 * kDRegSizeInBytes; | |
| 2959 Ldp(dst1, dst0, MemOperand(fp, offset)); | |
| 2960 } | |
| 2961 } | |
| 2962 | |
| 2963 | |
| 2964 // TODO(jbramley): Check that we're handling the frame pointer correctly. | |
| 2965 void MacroAssembler::EnterExitFrame(bool save_doubles, | |
| 2966 const Register& scratch, | |
| 2967 int extra_space) { | |
| 2968 ASSERT(jssp.Is(StackPointer())); | |
| 2969 | |
| 2970 // Set up the new stack frame. | |
| 2971 Mov(scratch, Operand(CodeObject())); | |
| 2972 Push(lr, fp); | |
| 2973 Mov(fp, StackPointer()); | |
| 2974 Push(xzr, scratch); | |
| 2975 // fp[8]: CallerPC (lr) | |
| 2976 // fp -> fp[0]: CallerFP (old fp) | |
| 2977 // fp[-8]: Space reserved for SPOffset. | |
| 2978 // jssp -> fp[-16]: CodeObject() | |
| 2979 STATIC_ASSERT((2 * kPointerSize) == | |
| 2980 ExitFrameConstants::kCallerSPDisplacement); | |
| 2981 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset); | |
| 2982 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset); | |
| 2983 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset); | |
| 2984 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset); | |
| 2985 | |
| 2986 // Save the frame pointer and context pointer in the top frame. | |
| 2987 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, | |
| 2988 isolate()))); | |
| 2989 Str(fp, MemOperand(scratch)); | |
| 2990 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, | |
| 2991 isolate()))); | |
| 2992 Str(cp, MemOperand(scratch)); | |
| 2993 | |
| 2994 STATIC_ASSERT((-2 * kPointerSize) == | |
| 2995 ExitFrameConstants::kLastExitFrameField); | |
| 2996 if (save_doubles) { | |
| 2997 ExitFramePreserveFPRegs(); | |
| 2998 } | |
| 2999 | |
| 3000 // Reserve space for the return address and for user requested memory. | |
| 3001 // We do this before aligning to make sure that we end up correctly | |
| 3002 // aligned with the minimum of wasted space. | |
| 3003 Claim(extra_space + 1, kXRegSizeInBytes); | |
| 3004 // fp[8]: CallerPC (lr) | |
| 3005 // fp -> fp[0]: CallerFP (old fp) | |
| 3006 // fp[-8]: Space reserved for SPOffset. | |
| 3007 // fp[-16]: CodeObject() | |
| 3008 // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true). | |
| 3009 // jssp[8]: Extra space reserved for caller (if extra_space != 0). | |
| 3010 // jssp -> jssp[0]: Space reserved for the return address. | |
| 3011 | |
| 3012 // Align and synchronize the system stack pointer with jssp. | |
| 3013 AlignAndSetCSPForFrame(); | |
| 3014 ASSERT(csp.Is(StackPointer())); | |
| 3015 | |
| 3016 // fp[8]: CallerPC (lr) | |
| 3017 // fp -> fp[0]: CallerFP (old fp) | |
| 3018 // fp[-8]: Space reserved for SPOffset. | |
| 3019 // fp[-16]: CodeObject() | |
| 3020 // csp[...]: Saved doubles, if saved_doubles is true. | |
| 3021 // csp[8]: Memory reserved for the caller if extra_space != 0. | |
| 3022 // Alignment padding, if necessary. | |
| 3023 // csp -> csp[0]: Space reserved for the return address. | |
| 3024 | |
| 3025 // ExitFrame::GetStateForFramePointer expects to find the return address at | |
| 3026 // the memory address immediately below the pointer stored in SPOffset. | |
| 3027 // It is not safe to derive much else from SPOffset, because the size of the | |
| 3028 // padding can vary. | |
| 3029 Add(scratch, csp, kXRegSizeInBytes); | |
| 3030 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); | |
| 3031 } | |
| 3032 | |
| 3033 | |
| 3034 // Leave the current exit frame. | |
| 3035 void MacroAssembler::LeaveExitFrame(bool restore_doubles, | |
| 3036 const Register& scratch, | |
| 3037 bool restore_context) { | |
| 3038 ASSERT(csp.Is(StackPointer())); | |
| 3039 | |
| 3040 if (restore_doubles) { | |
| 3041 ExitFrameRestoreFPRegs(); | |
| 3042 } | |
| 3043 | |
| 3044 // Restore the context pointer from the top frame. | |
| 3045 if (restore_context) { | |
| 3046 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, | |
| 3047 isolate()))); | |
| 3048 Ldr(cp, MemOperand(scratch)); | |
| 3049 } | |
| 3050 | |
| 3051 if (emit_debug_code()) { | |
| 3052 // Also emit debug code to clear the cp in the top frame. | |
| 3053 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, | |
| 3054 isolate()))); | |
| 3055 Str(xzr, MemOperand(scratch)); | |
| 3056 } | |
| 3057 // Clear the frame pointer from the top frame. | |
| 3058 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, | |
| 3059 isolate()))); | |
| 3060 Str(xzr, MemOperand(scratch)); | |
| 3061 | |
| 3062 // Pop the exit frame. | |
| 3063 // fp[8]: CallerPC (lr) | |
| 3064 // fp -> fp[0]: CallerFP (old fp) | |
| 3065 // fp[...]: The rest of the frame. | |
| 3066 Mov(jssp, fp); | |
| 3067 SetStackPointer(jssp); | |
| 3068 AssertStackConsistency(); | |
| 3069 Pop(fp, lr); | |
| 3070 } | |
| 3071 | |
| 3072 | |
| 3073 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | |
| 3074 Register scratch1, Register scratch2) { | |
| 3075 if (FLAG_native_code_counters && counter->Enabled()) { | |
| 3076 Mov(scratch1, value); | |
| 3077 Mov(scratch2, Operand(ExternalReference(counter))); | |
| 3078 Str(scratch1, MemOperand(scratch2)); | |
| 3079 } | |
| 3080 } | |
| 3081 | |
| 3082 | |
| 3083 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, | |
| 3084 Register scratch1, Register scratch2) { | |
| 3085 ASSERT(value != 0); | |
| 3086 if (FLAG_native_code_counters && counter->Enabled()) { | |
| 3087 Mov(scratch2, Operand(ExternalReference(counter))); | |
| 3088 Ldr(scratch1, MemOperand(scratch2)); | |
| 3089 Add(scratch1, scratch1, value); | |
| 3090 Str(scratch1, MemOperand(scratch2)); | |
| 3091 } | |
| 3092 } | |
| 3093 | |
| 3094 | |
| 3095 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | |
| 3096 Register scratch1, Register scratch2) { | |
| 3097 IncrementCounter(counter, -value, scratch1, scratch2); | |
| 3098 } | |
| 3099 | |
| 3100 | |
| 3101 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | |
| 3102 if (context_chain_length > 0) { | |
| 3103 // Move up the chain of contexts to the context containing the slot. | |
| 3104 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); | |
| 3105 for (int i = 1; i < context_chain_length; i++) { | |
| 3106 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); | |
| 3107 } | |
| 3108 } else { | |
| 3109 // Slot is in the current function context. Move it into the | |
| 3110 // destination register in case we store into it (the write barrier | |
| 3111 // cannot be allowed to destroy the context in cp). | |
| 3112 Mov(dst, cp); | |
| 3113 } | |
| 3114 } | |
| 3115 | |
| 3116 | |
| 3117 #ifdef ENABLE_DEBUGGER_SUPPORT | |
| 3118 void MacroAssembler::DebugBreak() { | |
| 3119 Mov(x0, 0); | |
| 3120 Mov(x1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); | |
| 3121 CEntryStub ces(1); | |
| 3122 ASSERT(AllowThisStubCall(&ces)); | |
| 3123 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); | |
| 3124 } | |
| 3125 #endif | |
| 3126 | |
| 3127 | |
| 3128 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, | |
| 3129 int handler_index) { | |
| 3130 ASSERT(jssp.Is(StackPointer())); | |
| 3131 // Adjust this code if the asserts don't hold. | |
| 3132 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | |
| 3133 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | |
| 3134 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | |
| 3135 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | |
| 3136 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | |
| 3137 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | |
| 3138 | |
| 3139 // For the JSEntry handler, we must preserve the live registers x0-x4. | |
| 3140 // (See JSEntryStub::GenerateBody().) | |
| 3141 | |
| 3142 unsigned state = | |
| 3143 StackHandler::IndexField::encode(handler_index) | | |
| 3144 StackHandler::KindField::encode(kind); | |
| 3145 | |
| 3146 // Set up the code object and the state for pushing. | |
| 3147 Mov(x10, Operand(CodeObject())); | |
| 3148 Mov(x11, state); | |
| 3149 | |
| 3150 // Push the frame pointer, context, state, and code object. | |
| 3151 if (kind == StackHandler::JS_ENTRY) { | |
| 3152 ASSERT(Smi::FromInt(0) == 0); | |
| 3153 Push(xzr, xzr, x11, x10); | |
| 3154 } else { | |
| 3155 Push(fp, cp, x11, x10); | |
| 3156 } | |
| 3157 | |
| 3158 // Link the current handler as the next handler. | |
| 3159 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | |
| 3160 Ldr(x10, MemOperand(x11)); | |
| 3161 Push(x10); | |
| 3162 // Set this new handler as the current one. | |
| 3163 Str(jssp, MemOperand(x11)); | |
| 3164 } | |
| 3165 | |
| 3166 | |
| 3167 void MacroAssembler::PopTryHandler() { | |
| 3168 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 3169 Pop(x10); | |
| 3170 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | |
| 3171 Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes); | |
| 3172 Str(x10, MemOperand(x11)); | |
| 3173 } | |
| 3174 | |
| 3175 | |
| 3176 void MacroAssembler::Allocate(int object_size, | |
| 3177 Register result, | |
| 3178 Register scratch1, | |
| 3179 Register scratch2, | |
| 3180 Label* gc_required, | |
| 3181 AllocationFlags flags) { | |
| 3182 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | |
| 3183 if (!FLAG_inline_new) { | |
| 3184 if (emit_debug_code()) { | |
| 3185 // Trash the registers to simulate an allocation failure. | |
| 3186 // We apply salt to the original zap value to easily spot the values. | |
| 3187 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | |
| 3188 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | |
| 3189 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | |
| 3190 } | |
| 3191 B(gc_required); | |
| 3192 return; | |
| 3193 } | |
| 3194 | |
| 3195 ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1())); | |
| 3196 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() && | |
| 3197 Tmp0().Is64Bits() && Tmp1().Is64Bits()); | |
| 3198 | |
| 3199 // Make object size into bytes. | |
| 3200 if ((flags & SIZE_IN_WORDS) != 0) { | |
| 3201 object_size *= kPointerSize; | |
| 3202 } | |
| 3203 ASSERT(0 == (object_size & kObjectAlignmentMask)); | |
| 3204 | |
| 3205 // Check relative positions of allocation top and limit addresses. | |
| 3206 // The values must be adjacent in memory to allow the use of LDP. | |
| 3207 ExternalReference heap_allocation_top = | |
| 3208 AllocationUtils::GetAllocationTopReference(isolate(), flags); | |
| 3209 ExternalReference heap_allocation_limit = | |
| 3210 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | |
| 3211 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | |
| 3212 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | |
| 3213 ASSERT((limit - top) == kPointerSize); | |
| 3214 | |
| 3215 // Set up allocation top address and object size registers. | |
| 3216 Register top_address = scratch1; | |
| 3217 Register allocation_limit = scratch2; | |
| 3218 Mov(top_address, Operand(heap_allocation_top)); | |
| 3219 | |
| 3220 if ((flags & RESULT_CONTAINS_TOP) == 0) { | |
| 3221 // Load allocation top into result and the allocation limit. | |
| 3222 Ldp(result, allocation_limit, MemOperand(top_address)); | |
| 3223 } else { | |
| 3224 if (emit_debug_code()) { | |
| 3225 // Assert that result actually contains top on entry. | |
| 3226 Ldr(Tmp0(), MemOperand(top_address)); | |
| 3227 Cmp(result, Tmp0()); | |
| 3228 Check(eq, kUnexpectedAllocationTop); | |
| 3229 } | |
| 3230 // Load the allocation limit. 'result' already contains the allocation top. | |
| 3231 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | |
| 3232 } | |
| 3233 | |
| 3234 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | |
| 3235 // the same alignment on A64. | |
| 3236 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | |
| 3237 | |
| 3238 // Calculate new top and bail out if new space is exhausted. | |
| 3239 Adds(Tmp1(), result, object_size); | |
| 3240 B(vs, gc_required); | |
| 3241 Cmp(Tmp1(), allocation_limit); | |
| 3242 B(hi, gc_required); | |
| 3243 Str(Tmp1(), MemOperand(top_address)); | |
| 3244 | |
| 3245 // Tag the object if requested. | |
| 3246 if ((flags & TAG_OBJECT) != 0) { | |
| 3247 Orr(result, result, kHeapObjectTag); | |
| 3248 } | |
| 3249 } | |
| 3250 | |
| 3251 | |
| 3252 void MacroAssembler::Allocate(Register object_size, | |
| 3253 Register result, | |
| 3254 Register scratch1, | |
| 3255 Register scratch2, | |
| 3256 Label* gc_required, | |
| 3257 AllocationFlags flags) { | |
| 3258 if (!FLAG_inline_new) { | |
| 3259 if (emit_debug_code()) { | |
| 3260 // Trash the registers to simulate an allocation failure. | |
| 3261 // We apply salt to the original zap value to easily spot the values. | |
| 3262 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | |
| 3263 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | |
| 3264 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | |
| 3265 } | |
| 3266 B(gc_required); | |
| 3267 return; | |
| 3268 } | |
| 3269 | |
| 3270 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1())); | |
| 3271 ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() && | |
| 3272 scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits()); | |
| 3273 | |
| 3274 // Check relative positions of allocation top and limit addresses. | |
| 3275 // The values must be adjacent in memory to allow the use of LDP. | |
| 3276 ExternalReference heap_allocation_top = | |
| 3277 AllocationUtils::GetAllocationTopReference(isolate(), flags); | |
| 3278 ExternalReference heap_allocation_limit = | |
| 3279 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | |
| 3280 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | |
| 3281 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | |
| 3282 ASSERT((limit - top) == kPointerSize); | |
| 3283 | |
| 3284 // Set up allocation top address and object size registers. | |
| 3285 Register top_address = scratch1; | |
| 3286 Register allocation_limit = scratch2; | |
| 3287 Mov(top_address, Operand(heap_allocation_top)); | |
| 3288 | |
| 3289 if ((flags & RESULT_CONTAINS_TOP) == 0) { | |
| 3290 // Load allocation top into result and the allocation limit. | |
| 3291 Ldp(result, allocation_limit, MemOperand(top_address)); | |
| 3292 } else { | |
| 3293 if (emit_debug_code()) { | |
| 3294 // Assert that result actually contains top on entry. | |
| 3295 Ldr(Tmp0(), MemOperand(top_address)); | |
| 3296 Cmp(result, Tmp0()); | |
| 3297 Check(eq, kUnexpectedAllocationTop); | |
| 3298 } | |
| 3299 // Load the allocation limit. 'result' already contains the allocation top. | |
| 3300 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | |
| 3301 } | |
| 3302 | |
| 3303 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | |
| 3304 // the same alignment on A64. | |
| 3305 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | |
| 3306 | |
| 3307 // Calculate new top and bail out if new space is exhausted | |
| 3308 if ((flags & SIZE_IN_WORDS) != 0) { | |
| 3309 Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2)); | |
| 3310 } else { | |
| 3311 Adds(Tmp1(), result, object_size); | |
| 3312 } | |
| 3313 | |
| 3314 if (emit_debug_code()) { | |
| 3315 Tst(Tmp1(), kObjectAlignmentMask); | |
| 3316 Check(eq, kUnalignedAllocationInNewSpace); | |
| 3317 } | |
| 3318 | |
| 3319 B(vs, gc_required); | |
| 3320 Cmp(Tmp1(), allocation_limit); | |
| 3321 B(hi, gc_required); | |
| 3322 Str(Tmp1(), MemOperand(top_address)); | |
| 3323 | |
| 3324 // Tag the object if requested. | |
| 3325 if ((flags & TAG_OBJECT) != 0) { | |
| 3326 Orr(result, result, kHeapObjectTag); | |
| 3327 } | |
| 3328 } | |
| 3329 | |
| 3330 | |
| 3331 void MacroAssembler::UndoAllocationInNewSpace(Register object, | |
| 3332 Register scratch) { | |
| 3333 ExternalReference new_space_allocation_top = | |
| 3334 ExternalReference::new_space_allocation_top_address(isolate()); | |
| 3335 | |
| 3336 // Make sure the object has no tag before resetting top. | |
| 3337 Bic(object, object, kHeapObjectTagMask); | |
| 3338 #ifdef DEBUG | |
| 3339 // Check that the object un-allocated is below the current top. | |
| 3340 Mov(scratch, Operand(new_space_allocation_top)); | |
| 3341 Ldr(scratch, MemOperand(scratch)); | |
| 3342 Cmp(object, scratch); | |
| 3343 Check(lt, kUndoAllocationOfNonAllocatedMemory); | |
| 3344 #endif | |
| 3345 // Write the address of the object to un-allocate as the current top. | |
| 3346 Mov(scratch, Operand(new_space_allocation_top)); | |
| 3347 Str(object, MemOperand(scratch)); | |
| 3348 } | |
| 3349 | |
| 3350 | |
| 3351 void MacroAssembler::AllocateTwoByteString(Register result, | |
| 3352 Register length, | |
| 3353 Register scratch1, | |
| 3354 Register scratch2, | |
| 3355 Register scratch3, | |
| 3356 Label* gc_required) { | |
| 3357 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); | |
| 3358 // Calculate the number of bytes needed for the characters in the string while | |
| 3359 // observing object alignment. | |
| 3360 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
| 3361 Add(scratch1, length, length); // Length in bytes, not chars. | |
| 3362 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); | |
| 3363 Bic(scratch1, scratch1, kObjectAlignmentMask); | |
| 3364 | |
| 3365 // Allocate two-byte string in new space. | |
| 3366 Allocate(scratch1, | |
| 3367 result, | |
| 3368 scratch2, | |
| 3369 scratch3, | |
| 3370 gc_required, | |
| 3371 TAG_OBJECT); | |
| 3372 | |
| 3373 // Set the map, length and hash field. | |
| 3374 InitializeNewString(result, | |
| 3375 length, | |
| 3376 Heap::kStringMapRootIndex, | |
| 3377 scratch1, | |
| 3378 scratch2); | |
| 3379 } | |
| 3380 | |
| 3381 | |
| 3382 void MacroAssembler::AllocateAsciiString(Register result, | |
| 3383 Register length, | |
| 3384 Register scratch1, | |
| 3385 Register scratch2, | |
| 3386 Register scratch3, | |
| 3387 Label* gc_required) { | |
| 3388 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); | |
| 3389 // Calculate the number of bytes needed for the characters in the string while | |
| 3390 // observing object alignment. | |
| 3391 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
| 3392 STATIC_ASSERT(kCharSize == 1); | |
| 3393 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); | |
| 3394 Bic(scratch1, scratch1, kObjectAlignmentMask); | |
| 3395 | |
| 3396 // Allocate ASCII string in new space. | |
| 3397 Allocate(scratch1, | |
| 3398 result, | |
| 3399 scratch2, | |
| 3400 scratch3, | |
| 3401 gc_required, | |
| 3402 TAG_OBJECT); | |
| 3403 | |
| 3404 // Set the map, length and hash field. | |
| 3405 InitializeNewString(result, | |
| 3406 length, | |
| 3407 Heap::kAsciiStringMapRootIndex, | |
| 3408 scratch1, | |
| 3409 scratch2); | |
| 3410 } | |
| 3411 | |
| 3412 | |
| 3413 void MacroAssembler::AllocateTwoByteConsString(Register result, | |
| 3414 Register length, | |
| 3415 Register scratch1, | |
| 3416 Register scratch2, | |
| 3417 Label* gc_required) { | |
| 3418 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, | |
| 3419 TAG_OBJECT); | |
| 3420 | |
| 3421 InitializeNewString(result, | |
| 3422 length, | |
| 3423 Heap::kConsStringMapRootIndex, | |
| 3424 scratch1, | |
| 3425 scratch2); | |
| 3426 } | |
| 3427 | |
| 3428 | |
| 3429 void MacroAssembler::AllocateAsciiConsString(Register result, | |
| 3430 Register length, | |
| 3431 Register scratch1, | |
| 3432 Register scratch2, | |
| 3433 Label* gc_required) { | |
| 3434 Label allocate_new_space, install_map; | |
| 3435 AllocationFlags flags = TAG_OBJECT; | |
| 3436 | |
| 3437 ExternalReference high_promotion_mode = ExternalReference:: | |
| 3438 new_space_high_promotion_mode_active_address(isolate()); | |
| 3439 Mov(scratch1, Operand(high_promotion_mode)); | |
| 3440 Ldr(scratch1, MemOperand(scratch1)); | |
| 3441 Cbz(scratch1, &allocate_new_space); | |
| 3442 | |
| 3443 Allocate(ConsString::kSize, | |
| 3444 result, | |
| 3445 scratch1, | |
| 3446 scratch2, | |
| 3447 gc_required, | |
| 3448 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); | |
| 3449 | |
| 3450 B(&install_map); | |
| 3451 | |
| 3452 Bind(&allocate_new_space); | |
| 3453 Allocate(ConsString::kSize, | |
| 3454 result, | |
| 3455 scratch1, | |
| 3456 scratch2, | |
| 3457 gc_required, | |
| 3458 flags); | |
| 3459 | |
| 3460 Bind(&install_map); | |
| 3461 | |
| 3462 InitializeNewString(result, | |
| 3463 length, | |
| 3464 Heap::kConsAsciiStringMapRootIndex, | |
| 3465 scratch1, | |
| 3466 scratch2); | |
| 3467 } | |
| 3468 | |
| 3469 | |
| 3470 void MacroAssembler::AllocateTwoByteSlicedString(Register result, | |
| 3471 Register length, | |
| 3472 Register scratch1, | |
| 3473 Register scratch2, | |
| 3474 Label* gc_required) { | |
| 3475 ASSERT(!AreAliased(result, length, scratch1, scratch2)); | |
| 3476 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, | |
| 3477 TAG_OBJECT); | |
| 3478 | |
| 3479 InitializeNewString(result, | |
| 3480 length, | |
| 3481 Heap::kSlicedStringMapRootIndex, | |
| 3482 scratch1, | |
| 3483 scratch2); | |
| 3484 } | |
| 3485 | |
| 3486 | |
| 3487 void MacroAssembler::AllocateAsciiSlicedString(Register result, | |
| 3488 Register length, | |
| 3489 Register scratch1, | |
| 3490 Register scratch2, | |
| 3491 Label* gc_required) { | |
| 3492 ASSERT(!AreAliased(result, length, scratch1, scratch2)); | |
| 3493 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, | |
| 3494 TAG_OBJECT); | |
| 3495 | |
| 3496 InitializeNewString(result, | |
| 3497 length, | |
| 3498 Heap::kSlicedAsciiStringMapRootIndex, | |
| 3499 scratch1, | |
| 3500 scratch2); | |
| 3501 } | |
| 3502 | |
| 3503 | |
| 3504 // Allocates a heap number or jumps to the need_gc label if the young space | |
| 3505 // is full and a scavenge is needed. | |
| 3506 void MacroAssembler::AllocateHeapNumber(Register result, | |
| 3507 Label* gc_required, | |
| 3508 Register scratch1, | |
| 3509 Register scratch2, | |
| 3510 Register heap_number_map) { | |
| 3511 // Allocate an object in the heap for the heap number and tag it as a heap | |
| 3512 // object. | |
| 3513 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, | |
| 3514 TAG_OBJECT); | |
| 3515 | |
| 3516 // Store heap number map in the allocated object. | |
| 3517 if (heap_number_map.Is(NoReg)) { | |
| 3518 heap_number_map = scratch1; | |
| 3519 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 3520 } | |
| 3521 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 3522 Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); | |
| 3523 } | |
| 3524 | |
| 3525 | |
| 3526 void MacroAssembler::AllocateHeapNumberWithValue(Register result, | |
| 3527 DoubleRegister value, | |
| 3528 Label* gc_required, | |
| 3529 Register scratch1, | |
| 3530 Register scratch2, | |
| 3531 Register heap_number_map) { | |
| 3532 // TODO(all): Check if it would be more efficient to use STP to store both | |
| 3533 // the map and the value. | |
| 3534 AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map); | |
| 3535 Str(value, FieldMemOperand(result, HeapNumber::kValueOffset)); | |
| 3536 } | |
| 3537 | |
| 3538 | |
| 3539 void MacroAssembler::JumpIfObjectType(Register object, | |
| 3540 Register map, | |
| 3541 Register type_reg, | |
| 3542 InstanceType type, | |
| 3543 Label* if_cond_pass, | |
| 3544 Condition cond) { | |
| 3545 CompareObjectType(object, map, type_reg, type); | |
| 3546 B(cond, if_cond_pass); | |
| 3547 } | |
| 3548 | |
| 3549 | |
| 3550 void MacroAssembler::JumpIfNotObjectType(Register object, | |
| 3551 Register map, | |
| 3552 Register type_reg, | |
| 3553 InstanceType type, | |
| 3554 Label* if_not_object) { | |
| 3555 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne); | |
| 3556 } | |
| 3557 | |
| 3558 | |
| 3559 // Sets condition flags based on comparison, and returns type in type_reg. | |
| 3560 void MacroAssembler::CompareObjectType(Register object, | |
| 3561 Register map, | |
| 3562 Register type_reg, | |
| 3563 InstanceType type) { | |
| 3564 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 3565 CompareInstanceType(map, type_reg, type); | |
| 3566 } | |
| 3567 | |
| 3568 | |
| 3569 // Sets condition flags based on comparison, and returns type in type_reg. | |
| 3570 void MacroAssembler::CompareInstanceType(Register map, | |
| 3571 Register type_reg, | |
| 3572 InstanceType type) { | |
| 3573 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
| 3574 Cmp(type_reg, type); | |
| 3575 } | |
| 3576 | |
| 3577 | |
| 3578 void MacroAssembler::CompareMap(Register obj, | |
| 3579 Register scratch, | |
| 3580 Handle<Map> map) { | |
| 3581 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | |
| 3582 CompareMap(scratch, map); | |
| 3583 } | |
| 3584 | |
| 3585 | |
| 3586 void MacroAssembler::CompareMap(Register obj_map, | |
| 3587 Handle<Map> map) { | |
| 3588 Cmp(obj_map, Operand(map)); | |
| 3589 } | |
| 3590 | |
| 3591 | |
| 3592 void MacroAssembler::CheckMap(Register obj, | |
| 3593 Register scratch, | |
| 3594 Handle<Map> map, | |
| 3595 Label* fail, | |
| 3596 SmiCheckType smi_check_type) { | |
| 3597 if (smi_check_type == DO_SMI_CHECK) { | |
| 3598 JumpIfSmi(obj, fail); | |
| 3599 } | |
| 3600 | |
| 3601 CompareMap(obj, scratch, map); | |
| 3602 B(ne, fail); | |
| 3603 } | |
| 3604 | |
| 3605 | |
| 3606 void MacroAssembler::CheckMap(Register obj, | |
| 3607 Register scratch, | |
| 3608 Heap::RootListIndex index, | |
| 3609 Label* fail, | |
| 3610 SmiCheckType smi_check_type) { | |
| 3611 if (smi_check_type == DO_SMI_CHECK) { | |
| 3612 JumpIfSmi(obj, fail); | |
| 3613 } | |
| 3614 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | |
| 3615 JumpIfNotRoot(scratch, index, fail); | |
| 3616 } | |
| 3617 | |
| 3618 | |
| 3619 void MacroAssembler::CheckMap(Register obj_map, | |
| 3620 Handle<Map> map, | |
| 3621 Label* fail, | |
| 3622 SmiCheckType smi_check_type) { | |
| 3623 if (smi_check_type == DO_SMI_CHECK) { | |
| 3624 JumpIfSmi(obj_map, fail); | |
| 3625 } | |
| 3626 | |
| 3627 CompareMap(obj_map, map); | |
| 3628 B(ne, fail); | |
| 3629 } | |
| 3630 | |
| 3631 | |
| 3632 void MacroAssembler::DispatchMap(Register obj, | |
| 3633 Register scratch, | |
| 3634 Handle<Map> map, | |
| 3635 Handle<Code> success, | |
| 3636 SmiCheckType smi_check_type) { | |
| 3637 Label fail; | |
| 3638 if (smi_check_type == DO_SMI_CHECK) { | |
| 3639 JumpIfSmi(obj, &fail); | |
| 3640 } | |
| 3641 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | |
| 3642 Cmp(scratch, Operand(map)); | |
| 3643 B(ne, &fail); | |
| 3644 Jump(success, RelocInfo::CODE_TARGET); | |
| 3645 Bind(&fail); | |
| 3646 } | |
| 3647 | |
| 3648 | |
| 3649 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { | |
| 3650 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 3651 Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset)); | |
| 3652 Tst(Tmp0(), mask); | |
| 3653 } | |
| 3654 | |
| 3655 | |
| 3656 void MacroAssembler::LoadElementsKind(Register result, Register object) { | |
| 3657 // Load map. | |
| 3658 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 3659 // Load the map's "bit field 2". | |
| 3660 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset)); | |
| 3661 // Retrieve elements_kind from bit field 2. | |
| 3662 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); | |
| 3663 } | |
| 3664 | |
| 3665 | |
| 3666 void MacroAssembler::TryGetFunctionPrototype(Register function, | |
| 3667 Register result, | |
| 3668 Register scratch, | |
| 3669 Label* miss, | |
| 3670 BoundFunctionAction action) { | |
| 3671 ASSERT(!AreAliased(function, result, scratch)); | |
| 3672 | |
| 3673 // Check that the receiver isn't a smi. | |
| 3674 JumpIfSmi(function, miss); | |
| 3675 | |
| 3676 // Check that the function really is a function. Load map into result reg. | |
| 3677 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss); | |
| 3678 | |
| 3679 if (action == kMissOnBoundFunction) { | |
| 3680 Register scratch_w = scratch.W(); | |
| 3681 Ldr(scratch, | |
| 3682 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | |
| 3683 // On 64-bit platforms, compiler hints field is not a smi. See definition of | |
| 3684 // kCompilerHintsOffset in src/objects.h. | |
| 3685 Ldr(scratch_w, | |
| 3686 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); | |
| 3687 Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss); | |
| 3688 } | |
| 3689 | |
| 3690 // Make sure that the function has an instance prototype. | |
| 3691 Label non_instance; | |
| 3692 Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); | |
| 3693 Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance); | |
| 3694 | |
| 3695 // Get the prototype or initial map from the function. | |
| 3696 Ldr(result, | |
| 3697 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | |
| 3698 | |
| 3699 // If the prototype or initial map is the hole, don't return it and simply | |
| 3700 // miss the cache instead. This will allow us to allocate a prototype object | |
| 3701 // on-demand in the runtime system. | |
| 3702 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss); | |
| 3703 | |
| 3704 // If the function does not have an initial map, we're done. | |
| 3705 Label done; | |
| 3706 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done); | |
| 3707 | |
| 3708 // Get the prototype from the initial map. | |
| 3709 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | |
| 3710 B(&done); | |
| 3711 | |
| 3712 // Non-instance prototype: fetch prototype from constructor field in initial | |
| 3713 // map. | |
| 3714 Bind(&non_instance); | |
| 3715 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | |
| 3716 | |
| 3717 // All done. | |
| 3718 Bind(&done); | |
| 3719 } | |
| 3720 | |
| 3721 | |
| 3722 void MacroAssembler::CompareRoot(const Register& obj, | |
| 3723 Heap::RootListIndex index) { | |
| 3724 ASSERT(!AreAliased(obj, Tmp0())); | |
| 3725 LoadRoot(Tmp0(), index); | |
| 3726 Cmp(obj, Tmp0()); | |
| 3727 } | |
| 3728 | |
| 3729 | |
| 3730 void MacroAssembler::JumpIfRoot(const Register& obj, | |
| 3731 Heap::RootListIndex index, | |
| 3732 Label* if_equal) { | |
| 3733 CompareRoot(obj, index); | |
| 3734 B(eq, if_equal); | |
| 3735 } | |
| 3736 | |
| 3737 | |
| 3738 void MacroAssembler::JumpIfNotRoot(const Register& obj, | |
| 3739 Heap::RootListIndex index, | |
| 3740 Label* if_not_equal) { | |
| 3741 CompareRoot(obj, index); | |
| 3742 B(ne, if_not_equal); | |
| 3743 } | |
| 3744 | |
| 3745 | |
| 3746 void MacroAssembler::CompareAndSplit(const Register& lhs, | |
| 3747 const Operand& rhs, | |
| 3748 Condition cond, | |
| 3749 Label* if_true, | |
| 3750 Label* if_false, | |
| 3751 Label* fall_through) { | |
| 3752 if ((if_true == if_false) && (if_false == fall_through)) { | |
| 3753 // Fall through. | |
| 3754 } else if (if_true == if_false) { | |
| 3755 B(if_true); | |
| 3756 } else if (if_false == fall_through) { | |
| 3757 CompareAndBranch(lhs, rhs, cond, if_true); | |
| 3758 } else if (if_true == fall_through) { | |
| 3759 CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false); | |
| 3760 } else { | |
| 3761 CompareAndBranch(lhs, rhs, cond, if_true); | |
| 3762 B(if_false); | |
| 3763 } | |
| 3764 } | |
| 3765 | |
| 3766 | |
| 3767 void MacroAssembler::TestAndSplit(const Register& reg, | |
| 3768 uint64_t bit_pattern, | |
| 3769 Label* if_all_clear, | |
| 3770 Label* if_any_set, | |
| 3771 Label* fall_through) { | |
| 3772 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) { | |
| 3773 // Fall through. | |
| 3774 } else if (if_all_clear == if_any_set) { | |
| 3775 B(if_all_clear); | |
| 3776 } else if (if_all_clear == fall_through) { | |
| 3777 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set); | |
| 3778 } else if (if_any_set == fall_through) { | |
| 3779 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear); | |
| 3780 } else { | |
| 3781 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set); | |
| 3782 B(if_all_clear); | |
| 3783 } | |
| 3784 } | |
| 3785 | |
| 3786 | |
| 3787 void MacroAssembler::CheckFastElements(Register map, | |
| 3788 Register scratch, | |
| 3789 Label* fail) { | |
| 3790 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | |
| 3791 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | |
| 3792 STATIC_ASSERT(FAST_ELEMENTS == 2); | |
| 3793 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | |
| 3794 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | |
| 3795 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue); | |
| 3796 B(hi, fail); | |
| 3797 } | |
| 3798 | |
| 3799 | |
| 3800 void MacroAssembler::CheckFastObjectElements(Register map, | |
| 3801 Register scratch, | |
| 3802 Label* fail) { | |
| 3803 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | |
| 3804 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | |
| 3805 STATIC_ASSERT(FAST_ELEMENTS == 2); | |
| 3806 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | |
| 3807 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | |
| 3808 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | |
| 3809 // If cond==ls, set cond=hi, otherwise compare. | |
| 3810 Ccmp(scratch, | |
| 3811 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi); | |
| 3812 B(hi, fail); | |
| 3813 } | |
| 3814 | |
| 3815 | |
| 3816 void MacroAssembler::CheckFastSmiElements(Register map, | |
| 3817 Register scratch, | |
| 3818 Label* fail) { | |
| 3819 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | |
| 3820 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | |
| 3821 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | |
| 3822 Cmp(scratch, Map::kMaximumBitField2FastHoleySmiElementValue); | |
| 3823 B(hi, fail); | |
| 3824 } | |
| 3825 | |
| 3826 | |
| 3827 // Note: The ARM version of this clobbers elements_reg, but this version does | |
| 3828 // not. Some uses of this in A64 assume that elements_reg will be preserved. | |
| 3829 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, | |
| 3830 Register key_reg, | |
| 3831 Register elements_reg, | |
| 3832 Register scratch1, | |
| 3833 FPRegister fpscratch1, | |
| 3834 FPRegister fpscratch2, | |
| 3835 Label* fail, | |
| 3836 int elements_offset) { | |
| 3837 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); | |
| 3838 Label store_num; | |
| 3839 | |
| 3840 // Speculatively convert the smi to a double - all smis can be exactly | |
| 3841 // represented as a double. | |
| 3842 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); | |
| 3843 | |
| 3844 // If value_reg is a smi, we're done. | |
| 3845 JumpIfSmi(value_reg, &store_num); | |
| 3846 | |
| 3847 // Ensure that the object is a heap number. | |
| 3848 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), | |
| 3849 fail, DONT_DO_SMI_CHECK); | |
| 3850 | |
| 3851 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | |
| 3852 Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | |
| 3853 | |
| 3854 // Check for NaN by comparing the number to itself: NaN comparison will | |
| 3855 // report unordered, indicated by the overflow flag being set. | |
| 3856 Fcmp(fpscratch1, fpscratch1); | |
| 3857 Fcsel(fpscratch1, fpscratch2, fpscratch1, vs); | |
| 3858 | |
| 3859 // Store the result. | |
| 3860 Bind(&store_num); | |
| 3861 Add(scratch1, elements_reg, | |
| 3862 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2)); | |
| 3863 Str(fpscratch1, | |
| 3864 FieldMemOperand(scratch1, | |
| 3865 FixedDoubleArray::kHeaderSize - elements_offset)); | |
| 3866 } | |
| 3867 | |
| 3868 | |
| 3869 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | |
| 3870 return has_frame_ || !stub->SometimesSetsUpAFrame(); | |
| 3871 } | |
| 3872 | |
| 3873 | |
| 3874 void MacroAssembler::IndexFromHash(Register hash, Register index) { | |
| 3875 // If the hash field contains an array index pick it out. The assert checks | |
| 3876 // that the constants for the maximum number of digits for an array index | |
| 3877 // cached in the hash field and the number of bits reserved for it does not | |
| 3878 // conflict. | |
| 3879 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | |
| 3880 (1 << String::kArrayIndexValueBits)); | |
| 3881 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in | |
| 3882 // the low kHashShift bits. | |
| 3883 STATIC_ASSERT(kSmiTag == 0); | |
| 3884 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); | |
| 3885 SmiTag(index, hash); | |
| 3886 } | |
| 3887 | |
| 3888 | |
| 3889 void MacroAssembler::EmitSeqStringSetCharCheck( | |
| 3890 Register string, | |
| 3891 Register index, | |
| 3892 SeqStringSetCharCheckIndexType index_type, | |
| 3893 Register scratch, | |
| 3894 uint32_t encoding_mask) { | |
| 3895 ASSERT(!AreAliased(string, index, scratch)); | |
| 3896 | |
| 3897 if (index_type == kIndexIsSmi) { | |
| 3898 AssertSmi(index); | |
| 3899 } | |
| 3900 | |
| 3901 // Check that string is an object. | |
| 3902 AssertNotSmi(string, kNonObject); | |
| 3903 | |
| 3904 // Check that string has an appropriate map. | |
| 3905 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); | |
| 3906 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
| 3907 | |
| 3908 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask); | |
| 3909 Cmp(scratch, encoding_mask); | |
| 3910 Check(eq, kUnexpectedStringType); | |
| 3911 | |
| 3912 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset)); | |
| 3913 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); | |
| 3914 Check(lt, kIndexIsTooLarge); | |
| 3915 | |
| 3916 ASSERT_EQ(0, Smi::FromInt(0)); | |
| 3917 Cmp(index, 0); | |
| 3918 Check(ge, kIndexIsNegative); | |
| 3919 } | |
| 3920 | |
| 3921 | |
| 3922 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | |
| 3923 Register scratch, | |
| 3924 Label* miss) { | |
| 3925 // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function. | |
| 3926 // The ARM version takes two scratch registers, and that should be enough for | |
| 3927 // all of the checks. | |
| 3928 | |
| 3929 Label same_contexts; | |
| 3930 | |
| 3931 ASSERT(!AreAliased(holder_reg, scratch)); | |
| 3932 | |
| 3933 // Load current lexical context from the stack frame. | |
| 3934 Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
| 3935 // In debug mode, make sure the lexical context is set. | |
| 3936 #ifdef DEBUG | |
| 3937 Cmp(scratch, 0); | |
| 3938 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | |
| 3939 #endif | |
| 3940 | |
| 3941 // Load the native context of the current context. | |
| 3942 int offset = | |
| 3943 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; | |
| 3944 Ldr(scratch, FieldMemOperand(scratch, offset)); | |
| 3945 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | |
| 3946 | |
| 3947 // Check the context is a native context. | |
| 3948 if (emit_debug_code()) { | |
| 3949 // Read the first word and compare to the global_context_map. | |
| 3950 Register temp = Tmp1(); | |
| 3951 Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset)); | |
| 3952 CompareRoot(temp, Heap::kNativeContextMapRootIndex); | |
| 3953 Check(eq, kExpectedNativeContext); | |
| 3954 } | |
| 3955 | |
| 3956 // Check if both contexts are the same. | |
| 3957 ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | |
| 3958 cmp(scratch, Tmp0()); | |
| 3959 b(&same_contexts, eq); | |
| 3960 | |
| 3961 // Check the context is a native context. | |
| 3962 if (emit_debug_code()) { | |
| 3963 // Move Tmp0() into a different register, as CompareRoot will use it. | |
| 3964 Register temp = Tmp1(); | |
| 3965 mov(temp, Tmp0()); | |
| 3966 CompareRoot(temp, Heap::kNullValueRootIndex); | |
| 3967 Check(ne, kExpectedNonNullContext); | |
| 3968 | |
| 3969 Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset)); | |
| 3970 CompareRoot(temp, Heap::kNativeContextMapRootIndex); | |
| 3971 Check(eq, kExpectedNativeContext); | |
| 3972 | |
| 3973 // Let's consider that Tmp0() has been cloberred by the MacroAssembler. | |
| 3974 // We reload it with its value. | |
| 3975 ldr(Tmp0(), FieldMemOperand(holder_reg, | |
| 3976 JSGlobalProxy::kNativeContextOffset)); | |
| 3977 } | |
| 3978 | |
| 3979 // Check that the security token in the calling global object is | |
| 3980 // compatible with the security token in the receiving global | |
| 3981 // object. | |
| 3982 int token_offset = Context::kHeaderSize + | |
| 3983 Context::SECURITY_TOKEN_INDEX * kPointerSize; | |
| 3984 | |
| 3985 ldr(scratch, FieldMemOperand(scratch, token_offset)); | |
| 3986 ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset)); | |
| 3987 cmp(scratch, Tmp0()); | |
| 3988 b(miss, ne); | |
| 3989 | |
| 3990 bind(&same_contexts); | |
| 3991 } | |
| 3992 | |
| 3993 | |
| 3994 // Compute the hash code from the untagged key. This must be kept in sync with | |
| 3995 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in | |
| 3996 // code-stub-hydrogen.cc | |
| 3997 void MacroAssembler::GetNumberHash(Register key, Register scratch) { | |
| 3998 ASSERT(!AreAliased(key, scratch)); | |
| 3999 | |
| 4000 // Xor original key with a seed. | |
| 4001 LoadRoot(scratch, Heap::kHashSeedRootIndex); | |
| 4002 Eor(key, key, Operand::UntagSmi(scratch)); | |
| 4003 | |
| 4004 // The algorithm uses 32-bit integer values. | |
| 4005 key = key.W(); | |
| 4006 scratch = scratch.W(); | |
| 4007 | |
| 4008 // Compute the hash code from the untagged key. This must be kept in sync | |
| 4009 // with ComputeIntegerHash in utils.h. | |
| 4010 // | |
| 4011 // hash = ~hash + (hash <<1 15); | |
| 4012 Mvn(scratch, key); | |
| 4013 Add(key, scratch, Operand(key, LSL, 15)); | |
| 4014 // hash = hash ^ (hash >> 12); | |
| 4015 Eor(key, key, Operand(key, LSR, 12)); | |
| 4016 // hash = hash + (hash << 2); | |
| 4017 Add(key, key, Operand(key, LSL, 2)); | |
| 4018 // hash = hash ^ (hash >> 4); | |
| 4019 Eor(key, key, Operand(key, LSR, 4)); | |
| 4020 // hash = hash * 2057; | |
| 4021 Mov(scratch, Operand(key, LSL, 11)); | |
| 4022 Add(key, key, Operand(key, LSL, 3)); | |
| 4023 Add(key, key, scratch); | |
| 4024 // hash = hash ^ (hash >> 16); | |
| 4025 Eor(key, key, Operand(key, LSR, 16)); | |
| 4026 } | |
| 4027 | |
| 4028 | |
| 4029 void MacroAssembler::LoadFromNumberDictionary(Label* miss, | |
| 4030 Register elements, | |
| 4031 Register key, | |
| 4032 Register result, | |
| 4033 Register scratch0, | |
| 4034 Register scratch1, | |
| 4035 Register scratch2, | |
| 4036 Register scratch3) { | |
| 4037 ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3)); | |
| 4038 | |
| 4039 Label done; | |
| 4040 | |
| 4041 SmiUntag(scratch0, key); | |
| 4042 GetNumberHash(scratch0, scratch1); | |
| 4043 | |
| 4044 // Compute the capacity mask. | |
| 4045 Ldrsw(scratch1, | |
| 4046 UntagSmiFieldMemOperand(elements, | |
| 4047 SeededNumberDictionary::kCapacityOffset)); | |
| 4048 Sub(scratch1, scratch1, 1); | |
| 4049 | |
| 4050 // Generate an unrolled loop that performs a few probes before giving up. | |
| 4051 for (int i = 0; i < kNumberDictionaryProbes; i++) { | |
| 4052 // Compute the masked index: (hash + i + i * i) & mask. | |
| 4053 if (i > 0) { | |
| 4054 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i)); | |
| 4055 } else { | |
| 4056 Mov(scratch2, scratch0); | |
| 4057 } | |
| 4058 And(scratch2, scratch2, scratch1); | |
| 4059 | |
| 4060 // Scale the index by multiplying by the element size. | |
| 4061 ASSERT(SeededNumberDictionary::kEntrySize == 3); | |
| 4062 Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); | |
| 4063 | |
| 4064 // Check if the key is identical to the name. | |
| 4065 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); | |
| 4066 Ldr(scratch3, | |
| 4067 FieldMemOperand(scratch2, | |
| 4068 SeededNumberDictionary::kElementsStartOffset)); | |
| 4069 Cmp(key, scratch3); | |
| 4070 if (i != (kNumberDictionaryProbes - 1)) { | |
| 4071 B(eq, &done); | |
| 4072 } else { | |
| 4073 B(ne, miss); | |
| 4074 } | |
| 4075 } | |
| 4076 | |
| 4077 Bind(&done); | |
| 4078 // Check that the value is a normal property. | |
| 4079 const int kDetailsOffset = | |
| 4080 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; | |
| 4081 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset)); | |
| 4082 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss); | |
| 4083 | |
| 4084 // Get the value at the masked, scaled index and return. | |
| 4085 const int kValueOffset = | |
| 4086 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | |
| 4087 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); | |
| 4088 } | |
| 4089 | |
| 4090 | |
| 4091 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | |
| 4092 Register address, | |
| 4093 Register scratch, | |
| 4094 SaveFPRegsMode fp_mode, | |
| 4095 RememberedSetFinalAction and_then) { | |
| 4096 ASSERT(!AreAliased(object, address, scratch)); | |
| 4097 Label done, store_buffer_overflow; | |
| 4098 if (emit_debug_code()) { | |
| 4099 Label ok; | |
| 4100 JumpIfNotInNewSpace(object, &ok); | |
| 4101 Abort(kRememberedSetPointerInNewSpace); | |
| 4102 bind(&ok); | |
| 4103 } | |
| 4104 // Load store buffer top. | |
| 4105 Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate()))); | |
| 4106 Ldr(scratch, MemOperand(Tmp0())); | |
| 4107 // Store pointer to buffer and increment buffer top. | |
| 4108 Str(address, MemOperand(scratch, kPointerSize, PostIndex)); | |
| 4109 // Write back new top of buffer. | |
| 4110 Str(scratch, MemOperand(Tmp0())); | |
| 4111 // Call stub on end of buffer. | |
| 4112 // Check for end of buffer. | |
| 4113 ASSERT(StoreBuffer::kStoreBufferOverflowBit == | |
| 4114 (1 << (14 + kPointerSizeLog2))); | |
| 4115 if (and_then == kFallThroughAtEnd) { | |
| 4116 Tbz(scratch, (14 + kPointerSizeLog2), &done); | |
| 4117 } else { | |
| 4118 ASSERT(and_then == kReturnAtEnd); | |
| 4119 Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow); | |
| 4120 Ret(); | |
| 4121 } | |
| 4122 | |
| 4123 Bind(&store_buffer_overflow); | |
| 4124 Push(lr); | |
| 4125 StoreBufferOverflowStub store_buffer_overflow_stub = | |
| 4126 StoreBufferOverflowStub(fp_mode); | |
| 4127 CallStub(&store_buffer_overflow_stub); | |
| 4128 Pop(lr); | |
| 4129 | |
| 4130 Bind(&done); | |
| 4131 if (and_then == kReturnAtEnd) { | |
| 4132 Ret(); | |
| 4133 } | |
| 4134 } | |
| 4135 | |
| 4136 | |
| 4137 void MacroAssembler::PopSafepointRegisters() { | |
| 4138 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | |
| 4139 PopXRegList(kSafepointSavedRegisters); | |
| 4140 Drop(num_unsaved); | |
| 4141 } | |
| 4142 | |
| 4143 | |
| 4144 void MacroAssembler::PushSafepointRegisters() { | |
| 4145 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so | |
| 4146 // adjust the stack for unsaved registers. | |
| 4147 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | |
| 4148 ASSERT(num_unsaved >= 0); | |
| 4149 Claim(num_unsaved); | |
| 4150 PushXRegList(kSafepointSavedRegisters); | |
| 4151 } | |
| 4152 | |
| 4153 | |
| 4154 void MacroAssembler::PushSafepointFPRegisters() { | |
| 4155 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize, | |
| 4156 FPRegister::kAllocatableFPRegisters)); | |
| 4157 } | |
| 4158 | |
| 4159 | |
| 4160 void MacroAssembler::PopSafepointFPRegisters() { | |
| 4161 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize, | |
| 4162 FPRegister::kAllocatableFPRegisters)); | |
| 4163 } | |
| 4164 | |
| 4165 | |
| 4166 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | |
| 4167 // Make sure the safepoint registers list is what we expect. | |
| 4168 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); | |
| 4169 | |
| 4170 // Safepoint registers are stored contiguously on the stack, but not all the | |
| 4171 // registers are saved. The following registers are excluded: | |
| 4172 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of | |
| 4173 // the macro assembler. | |
| 4174 // - x28 (jssp) because JS stack pointer doesn't need to be included in | |
| 4175 // safepoint registers. | |
| 4176 // - x31 (csp) because the system stack pointer doesn't need to be included | |
| 4177 // in safepoint registers. | |
| 4178 // | |
| 4179 // This function implements the mapping of register code to index into the | |
| 4180 // safepoint register slots. | |
| 4181 if ((reg_code >= 0) && (reg_code <= 15)) { | |
| 4182 return reg_code; | |
| 4183 } else if ((reg_code >= 18) && (reg_code <= 27)) { | |
| 4184 // Skip ip0 and ip1. | |
| 4185 return reg_code - 2; | |
| 4186 } else if ((reg_code == 29) || (reg_code == 30)) { | |
| 4187 // Also skip jssp. | |
| 4188 return reg_code - 3; | |
| 4189 } else { | |
| 4190 // This register has no safepoint register slot. | |
| 4191 UNREACHABLE(); | |
| 4192 return -1; | |
| 4193 } | |
| 4194 } | |
| 4195 | |
| 4196 | |
| 4197 void MacroAssembler::CheckPageFlagSet(const Register& object, | |
| 4198 const Register& scratch, | |
| 4199 int mask, | |
| 4200 Label* if_any_set) { | |
| 4201 And(scratch, object, ~Page::kPageAlignmentMask); | |
| 4202 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | |
| 4203 TestAndBranchIfAnySet(scratch, mask, if_any_set); | |
| 4204 } | |
| 4205 | |
| 4206 | |
| 4207 void MacroAssembler::CheckPageFlagClear(const Register& object, | |
| 4208 const Register& scratch, | |
| 4209 int mask, | |
| 4210 Label* if_all_clear) { | |
| 4211 And(scratch, object, ~Page::kPageAlignmentMask); | |
| 4212 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | |
| 4213 TestAndBranchIfAllClear(scratch, mask, if_all_clear); | |
| 4214 } | |
| 4215 | |
| 4216 | |
| 4217 void MacroAssembler::RecordWriteField( | |
| 4218 Register object, | |
| 4219 int offset, | |
| 4220 Register value, | |
| 4221 Register scratch, | |
| 4222 LinkRegisterStatus lr_status, | |
| 4223 SaveFPRegsMode save_fp, | |
| 4224 RememberedSetAction remembered_set_action, | |
| 4225 SmiCheck smi_check) { | |
| 4226 // First, check if a write barrier is even needed. The tests below | |
| 4227 // catch stores of Smis. | |
| 4228 Label done; | |
| 4229 | |
| 4230 // Skip the barrier if writing a smi. | |
| 4231 if (smi_check == INLINE_SMI_CHECK) { | |
| 4232 JumpIfSmi(value, &done); | |
| 4233 } | |
| 4234 | |
| 4235 // Although the object register is tagged, the offset is relative to the start | |
| 4236 // of the object, so offset must be a multiple of kPointerSize. | |
| 4237 ASSERT(IsAligned(offset, kPointerSize)); | |
| 4238 | |
| 4239 Add(scratch, object, offset - kHeapObjectTag); | |
| 4240 if (emit_debug_code()) { | |
| 4241 Label ok; | |
| 4242 Tst(scratch, (1 << kPointerSizeLog2) - 1); | |
| 4243 B(eq, &ok); | |
| 4244 Abort(kUnalignedCellInWriteBarrier); | |
| 4245 Bind(&ok); | |
| 4246 } | |
| 4247 | |
| 4248 RecordWrite(object, | |
| 4249 scratch, | |
| 4250 value, | |
| 4251 lr_status, | |
| 4252 save_fp, | |
| 4253 remembered_set_action, | |
| 4254 OMIT_SMI_CHECK); | |
| 4255 | |
| 4256 Bind(&done); | |
| 4257 | |
| 4258 // Clobber clobbered input registers when running with the debug-code flag | |
| 4259 // turned on to provoke errors. | |
| 4260 if (emit_debug_code()) { | |
| 4261 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); | |
| 4262 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); | |
| 4263 } | |
| 4264 } | |
| 4265 | |
| 4266 | |
| 4267 // Will clobber: object, address, value, Tmp0(), Tmp1(). | |
| 4268 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. | |
| 4269 // | |
| 4270 // The register 'object' contains a heap object pointer. The heap object tag is | |
| 4271 // shifted away. | |
| 4272 void MacroAssembler::RecordWrite(Register object, | |
| 4273 Register address, | |
| 4274 Register value, | |
| 4275 LinkRegisterStatus lr_status, | |
| 4276 SaveFPRegsMode fp_mode, | |
| 4277 RememberedSetAction remembered_set_action, | |
| 4278 SmiCheck smi_check) { | |
| 4279 ASM_LOCATION("MacroAssembler::RecordWrite"); | |
| 4280 ASSERT(!AreAliased(object, value)); | |
| 4281 | |
| 4282 if (emit_debug_code()) { | |
| 4283 Ldr(Tmp0(), MemOperand(address)); | |
| 4284 Cmp(Tmp0(), value); | |
| 4285 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | |
| 4286 } | |
| 4287 | |
| 4288 // Count number of write barriers in generated code. | |
| 4289 isolate()->counters()->write_barriers_static()->Increment(); | |
| 4290 // TODO(mstarzinger): Dynamic counter missing. | |
| 4291 | |
| 4292 // First, check if a write barrier is even needed. The tests below | |
| 4293 // catch stores of smis and stores into the young generation. | |
| 4294 Label done; | |
| 4295 | |
| 4296 if (smi_check == INLINE_SMI_CHECK) { | |
| 4297 ASSERT_EQ(0, kSmiTag); | |
| 4298 JumpIfSmi(value, &done); | |
| 4299 } | |
| 4300 | |
| 4301 CheckPageFlagClear(value, | |
| 4302 value, // Used as scratch. | |
| 4303 MemoryChunk::kPointersToHereAreInterestingMask, | |
| 4304 &done); | |
| 4305 CheckPageFlagClear(object, | |
| 4306 value, // Used as scratch. | |
| 4307 MemoryChunk::kPointersFromHereAreInterestingMask, | |
| 4308 &done); | |
| 4309 | |
| 4310 // Record the actual write. | |
| 4311 if (lr_status == kLRHasNotBeenSaved) { | |
| 4312 Push(lr); | |
| 4313 } | |
| 4314 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); | |
| 4315 CallStub(&stub); | |
| 4316 if (lr_status == kLRHasNotBeenSaved) { | |
| 4317 Pop(lr); | |
| 4318 } | |
| 4319 | |
| 4320 Bind(&done); | |
| 4321 | |
| 4322 // Clobber clobbered registers when running with the debug-code flag | |
| 4323 // turned on to provoke errors. | |
| 4324 if (emit_debug_code()) { | |
| 4325 Mov(address, Operand(BitCast<int64_t>(kZapValue + 12))); | |
| 4326 Mov(value, Operand(BitCast<int64_t>(kZapValue + 16))); | |
| 4327 } | |
| 4328 } | |
| 4329 | |
| 4330 | |
| 4331 void MacroAssembler::AssertHasValidColor(const Register& reg) { | |
| 4332 if (emit_debug_code()) { | |
| 4333 // The bit sequence is backward. The first character in the string | |
| 4334 // represents the least significant bit. | |
| 4335 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | |
| 4336 | |
| 4337 Label color_is_valid; | |
| 4338 Tbnz(reg, 0, &color_is_valid); | |
| 4339 Tbz(reg, 1, &color_is_valid); | |
| 4340 Abort(kUnexpectedColorFound); | |
| 4341 Bind(&color_is_valid); | |
| 4342 } | |
| 4343 } | |
| 4344 | |
| 4345 | |
| 4346 void MacroAssembler::GetMarkBits(Register addr_reg, | |
| 4347 Register bitmap_reg, | |
| 4348 Register shift_reg) { | |
| 4349 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg)); | |
| 4350 // addr_reg is divided into fields: | |
| 4351 // |63 page base 20|19 high 8|7 shift 3|2 0| | |
| 4352 // 'high' gives the index of the cell holding color bits for the object. | |
| 4353 // 'shift' gives the offset in the cell for this object's color. | |
| 4354 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | |
| 4355 Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits); | |
| 4356 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); | |
| 4357 Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2)); | |
| 4358 // bitmap_reg: | |
| 4359 // |63 page base 20|19 zeros 15|14 high 3|2 0| | |
| 4360 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | |
| 4361 } | |
| 4362 | |
| 4363 | |
| 4364 void MacroAssembler::HasColor(Register object, | |
| 4365 Register bitmap_scratch, | |
| 4366 Register shift_scratch, | |
| 4367 Label* has_color, | |
| 4368 int first_bit, | |
| 4369 int second_bit) { | |
| 4370 // See mark-compact.h for color definitions. | |
| 4371 ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch)); | |
| 4372 | |
| 4373 GetMarkBits(object, bitmap_scratch, shift_scratch); | |
| 4374 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | |
| 4375 // Shift the bitmap down to get the color of the object in bits [1:0]. | |
| 4376 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch); | |
| 4377 | |
| 4378 AssertHasValidColor(bitmap_scratch); | |
| 4379 | |
| 4380 // These bit sequences are backwards. The first character in the string | |
| 4381 // represents the least significant bit. | |
| 4382 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
| 4383 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
| 4384 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
| 4385 | |
| 4386 // Check for the color. | |
| 4387 if (first_bit == 0) { | |
| 4388 // Checking for white. | |
| 4389 ASSERT(second_bit == 0); | |
| 4390 // We only need to test the first bit. | |
| 4391 Tbz(bitmap_scratch, 0, has_color); | |
| 4392 } else { | |
| 4393 Label other_color; | |
| 4394 // Checking for grey or black. | |
| 4395 Tbz(bitmap_scratch, 0, &other_color); | |
| 4396 if (second_bit == 0) { | |
| 4397 Tbz(bitmap_scratch, 1, has_color); | |
| 4398 } else { | |
| 4399 Tbnz(bitmap_scratch, 1, has_color); | |
| 4400 } | |
| 4401 Bind(&other_color); | |
| 4402 } | |
| 4403 | |
| 4404 // Fall through if it does not have the right color. | |
| 4405 } | |
| 4406 | |
| 4407 | |
| 4408 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, | |
| 4409 Register scratch, | |
| 4410 Label* if_deprecated) { | |
| 4411 if (map->CanBeDeprecated()) { | |
| 4412 Mov(scratch, Operand(map)); | |
| 4413 Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset)); | |
| 4414 TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated); | |
| 4415 } | |
| 4416 } | |
| 4417 | |
| 4418 | |
| 4419 void MacroAssembler::JumpIfBlack(Register object, | |
| 4420 Register scratch0, | |
| 4421 Register scratch1, | |
| 4422 Label* on_black) { | |
| 4423 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
| 4424 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. | |
| 4425 } | |
| 4426 | |
| 4427 | |
| 4428 void MacroAssembler::JumpIfDictionaryInPrototypeChain( | |
| 4429 Register object, | |
| 4430 Register scratch0, | |
| 4431 Register scratch1, | |
| 4432 Label* found) { | |
| 4433 ASSERT(!AreAliased(object, scratch0, scratch1)); | |
| 4434 Factory* factory = isolate()->factory(); | |
| 4435 Register current = scratch0; | |
| 4436 Label loop_again; | |
| 4437 | |
| 4438 // Scratch contains elements pointer. | |
| 4439 Mov(current, object); | |
| 4440 | |
| 4441 // Loop based on the map going up the prototype chain. | |
| 4442 Bind(&loop_again); | |
| 4443 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); | |
| 4444 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); | |
| 4445 Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount); | |
| 4446 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found); | |
| 4447 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); | |
| 4448 CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again); | |
| 4449 } | |
| 4450 | |
| 4451 | |
| 4452 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, | |
| 4453 Register result) { | |
| 4454 ASSERT(!result.Is(ldr_location)); | |
| 4455 const uint32_t kLdrLitOffset_lsb = 5; | |
| 4456 const uint32_t kLdrLitOffset_width = 19; | |
| 4457 Ldr(result, MemOperand(ldr_location)); | |
| 4458 if (emit_debug_code()) { | |
| 4459 And(result, result, LoadLiteralFMask); | |
| 4460 Cmp(result, LoadLiteralFixed); | |
| 4461 Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral); | |
| 4462 // The instruction was clobbered. Reload it. | |
| 4463 Ldr(result, MemOperand(ldr_location)); | |
| 4464 } | |
| 4465 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width); | |
| 4466 Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2)); | |
| 4467 } | |
| 4468 | |
| 4469 | |
| 4470 void MacroAssembler::EnsureNotWhite( | |
| 4471 Register value, | |
| 4472 Register bitmap_scratch, | |
| 4473 Register shift_scratch, | |
| 4474 Register load_scratch, | |
| 4475 Register length_scratch, | |
| 4476 Label* value_is_white_and_not_data) { | |
| 4477 ASSERT(!AreAliased( | |
| 4478 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch)); | |
| 4479 | |
| 4480 // These bit sequences are backwards. The first character in the string | |
| 4481 // represents the least significant bit. | |
| 4482 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
| 4483 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
| 4484 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
| 4485 | |
| 4486 GetMarkBits(value, bitmap_scratch, shift_scratch); | |
| 4487 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | |
| 4488 Lsr(load_scratch, load_scratch, shift_scratch); | |
| 4489 | |
| 4490 AssertHasValidColor(load_scratch); | |
| 4491 | |
| 4492 // If the value is black or grey we don't need to do anything. | |
| 4493 // Since both black and grey have a 1 in the first position and white does | |
| 4494 // not have a 1 there we only need to check one bit. | |
| 4495 Label done; | |
| 4496 Tbnz(load_scratch, 0, &done); | |
| 4497 | |
| 4498 // Value is white. We check whether it is data that doesn't need scanning. | |
| 4499 Register map = load_scratch; // Holds map while checking type. | |
| 4500 Label is_data_object; | |
| 4501 | |
| 4502 // Check for heap-number. | |
| 4503 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | |
| 4504 Mov(length_scratch, HeapNumber::kSize); | |
| 4505 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object); | |
| 4506 | |
| 4507 // Check for strings. | |
| 4508 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | |
| 4509 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | |
| 4510 // If it's a string and it's not a cons string then it's an object containing | |
| 4511 // no GC pointers. | |
| 4512 Register instance_type = load_scratch; | |
| 4513 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
| 4514 TestAndBranchIfAnySet(instance_type, | |
| 4515 kIsIndirectStringMask | kIsNotStringMask, | |
| 4516 value_is_white_and_not_data); | |
| 4517 | |
| 4518 // It's a non-indirect (non-cons and non-slice) string. | |
| 4519 // If it's external, the length is just ExternalString::kSize. | |
| 4520 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). | |
| 4521 // External strings are the only ones with the kExternalStringTag bit | |
| 4522 // set. | |
| 4523 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); | |
| 4524 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); | |
| 4525 Mov(length_scratch, ExternalString::kSize); | |
| 4526 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object); | |
| 4527 | |
| 4528 // Sequential string, either ASCII or UC16. | |
| 4529 // For ASCII (char-size of 1) we shift the smi tag away to get the length. | |
| 4530 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby | |
| 4531 // getting the length multiplied by 2. | |
| 4532 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); | |
| 4533 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value, | |
| 4534 String::kLengthOffset)); | |
| 4535 Tst(instance_type, kStringEncodingMask); | |
| 4536 Cset(load_scratch, eq); | |
| 4537 Lsl(length_scratch, length_scratch, load_scratch); | |
| 4538 Add(length_scratch, | |
| 4539 length_scratch, | |
| 4540 SeqString::kHeaderSize + kObjectAlignmentMask); | |
| 4541 Bic(length_scratch, length_scratch, kObjectAlignmentMask); | |
| 4542 | |
| 4543 Bind(&is_data_object); | |
| 4544 // Value is a data object, and it is white. Mark it black. Since we know | |
| 4545 // that the object is white we can make it black by flipping one bit. | |
| 4546 Register mask = shift_scratch; | |
| 4547 Mov(load_scratch, 1); | |
| 4548 Lsl(mask, load_scratch, shift_scratch); | |
| 4549 | |
| 4550 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | |
| 4551 Orr(load_scratch, load_scratch, mask); | |
| 4552 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | |
| 4553 | |
| 4554 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask); | |
| 4555 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | |
| 4556 Add(load_scratch, load_scratch, length_scratch); | |
| 4557 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | |
| 4558 | |
| 4559 Bind(&done); | |
| 4560 } | |
| 4561 | |
| 4562 | |
| 4563 void MacroAssembler::Assert(Condition cond, BailoutReason reason) { | |
| 4564 if (emit_debug_code()) { | |
| 4565 Check(cond, reason); | |
| 4566 } | |
| 4567 } | |
| 4568 | |
| 4569 | |
| 4570 | |
| 4571 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { | |
| 4572 if (emit_debug_code()) { | |
| 4573 CheckRegisterIsClear(reg, reason); | |
| 4574 } | |
| 4575 } | |
| 4576 | |
| 4577 | |
| 4578 void MacroAssembler::AssertRegisterIsRoot(Register reg, | |
| 4579 Heap::RootListIndex index, | |
| 4580 BailoutReason reason) { | |
| 4581 // CompareRoot uses Tmp0(). | |
| 4582 ASSERT(!reg.Is(Tmp0())); | |
| 4583 if (emit_debug_code()) { | |
| 4584 CompareRoot(reg, index); | |
| 4585 Check(eq, reason); | |
| 4586 } | |
| 4587 } | |
| 4588 | |
| 4589 | |
| 4590 void MacroAssembler::AssertFastElements(Register elements) { | |
| 4591 if (emit_debug_code()) { | |
| 4592 Register temp = Tmp1(); | |
| 4593 Label ok; | |
| 4594 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 4595 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); | |
| 4596 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); | |
| 4597 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); | |
| 4598 Abort(kJSObjectWithFastElementsMapHasSlowElements); | |
| 4599 Bind(&ok); | |
| 4600 } | |
| 4601 } | |
| 4602 | |
| 4603 | |
| 4604 void MacroAssembler::AssertIsString(const Register& object) { | |
| 4605 if (emit_debug_code()) { | |
| 4606 Register temp = Tmp1(); | |
| 4607 STATIC_ASSERT(kSmiTag == 0); | |
| 4608 Tst(object, Operand(kSmiTagMask)); | |
| 4609 Check(ne, kOperandIsNotAString); | |
| 4610 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 4611 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | |
| 4612 Check(lo, kOperandIsNotAString); | |
| 4613 } | |
| 4614 } | |
| 4615 | |
| 4616 | |
| 4617 void MacroAssembler::Check(Condition cond, BailoutReason reason) { | |
| 4618 Label ok; | |
| 4619 B(cond, &ok); | |
| 4620 Abort(reason); | |
| 4621 // Will not return here. | |
| 4622 Bind(&ok); | |
| 4623 } | |
| 4624 | |
| 4625 | |
| 4626 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) { | |
| 4627 Label ok; | |
| 4628 Cbz(reg, &ok); | |
| 4629 Abort(reason); | |
| 4630 // Will not return here. | |
| 4631 Bind(&ok); | |
| 4632 } | |
| 4633 | |
| 4634 | |
| 4635 void MacroAssembler::Abort(BailoutReason reason) { | |
| 4636 #ifdef DEBUG | |
| 4637 RecordComment("Abort message: "); | |
| 4638 RecordComment(GetBailoutReason(reason)); | |
| 4639 | |
| 4640 if (FLAG_trap_on_abort) { | |
| 4641 Brk(0); | |
| 4642 return; | |
| 4643 } | |
| 4644 #endif | |
| 4645 | |
| 4646 // Abort is used in some contexts where csp is the stack pointer. In order to | |
| 4647 // simplify the CallRuntime code, make sure that jssp is the stack pointer. | |
| 4648 // There is no risk of register corruption here because Abort doesn't return. | |
| 4649 Register old_stack_pointer = StackPointer(); | |
| 4650 SetStackPointer(jssp); | |
| 4651 Mov(jssp, old_stack_pointer); | |
| 4652 | |
| 4653 if (use_real_aborts()) { | |
| 4654 // Avoid infinite recursion; Push contains some assertions that use Abort. | |
| 4655 NoUseRealAbortsScope no_real_aborts(this); | |
| 4656 | |
| 4657 Mov(x0, Operand(Smi::FromInt(reason))); | |
| 4658 Push(x0); | |
| 4659 | |
| 4660 if (!has_frame_) { | |
| 4661 // We don't actually want to generate a pile of code for this, so just | |
| 4662 // claim there is a stack frame, without generating one. | |
| 4663 FrameScope scope(this, StackFrame::NONE); | |
| 4664 CallRuntime(Runtime::kAbort, 1); | |
| 4665 } else { | |
| 4666 CallRuntime(Runtime::kAbort, 1); | |
| 4667 } | |
| 4668 } else { | |
| 4669 // Load the string to pass to Printf. | |
| 4670 Label msg_address; | |
| 4671 Adr(x0, &msg_address); | |
| 4672 | |
| 4673 // Call Printf directly to report the error. | |
| 4674 CallPrintf(); | |
| 4675 | |
| 4676 // We need a way to stop execution on both the simulator and real hardware, | |
| 4677 // and Unreachable() is the best option. | |
| 4678 Unreachable(); | |
| 4679 | |
| 4680 // Emit the message string directly in the instruction stream. | |
| 4681 { | |
| 4682 BlockConstPoolScope scope(this); | |
| 4683 Bind(&msg_address); | |
| 4684 EmitStringData(GetBailoutReason(reason)); | |
| 4685 } | |
| 4686 } | |
| 4687 | |
| 4688 SetStackPointer(old_stack_pointer); | |
| 4689 } | |
| 4690 | |
| 4691 | |
| 4692 void MacroAssembler::LoadTransitionedArrayMapConditional( | |
| 4693 ElementsKind expected_kind, | |
| 4694 ElementsKind transitioned_kind, | |
| 4695 Register map_in_out, | |
| 4696 Register scratch, | |
| 4697 Label* no_map_match) { | |
| 4698 // Load the global or builtins object from the current context. | |
| 4699 Ldr(scratch, GlobalObjectMemOperand()); | |
| 4700 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | |
| 4701 | |
| 4702 // Check that the function's map is the same as the expected cached map. | |
| 4703 Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX)); | |
| 4704 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | |
| 4705 Ldr(Tmp0(), FieldMemOperand(scratch, offset)); | |
| 4706 Cmp(map_in_out, Tmp0()); | |
| 4707 B(ne, no_map_match); | |
| 4708 | |
| 4709 // Use the transitioned cached map. | |
| 4710 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | |
| 4711 Ldr(map_in_out, FieldMemOperand(scratch, offset)); | |
| 4712 } | |
| 4713 | |
| 4714 | |
| 4715 void MacroAssembler::LoadArrayFunction(Register function) { | |
| 4716 // Load the global or builtins object from the current context. | |
| 4717 Ldr(function, GlobalObjectMemOperand()); | |
| 4718 // Load the global context from the global or builtins object. | |
| 4719 Ldr(function, | |
| 4720 FieldMemOperand(function, GlobalObject::kGlobalContextOffset)); | |
| 4721 // Load the array function from the native context. | |
| 4722 Ldr(function, ContextMemOperand(function, Context::ARRAY_FUNCTION_INDEX)); | |
| 4723 } | |
| 4724 | |
| 4725 | |
| 4726 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | |
| 4727 // Load the global or builtins object from the current context. | |
| 4728 Ldr(function, GlobalObjectMemOperand()); | |
| 4729 // Load the native context from the global or builtins object. | |
| 4730 Ldr(function, FieldMemOperand(function, | |
| 4731 GlobalObject::kNativeContextOffset)); | |
| 4732 // Load the function from the native context. | |
| 4733 Ldr(function, ContextMemOperand(function, index)); | |
| 4734 } | |
| 4735 | |
| 4736 | |
| 4737 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, | |
| 4738 Register map, | |
| 4739 Register scratch) { | |
| 4740 // Load the initial map. The global functions all have initial maps. | |
| 4741 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | |
| 4742 if (emit_debug_code()) { | |
| 4743 Label ok, fail; | |
| 4744 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); | |
| 4745 B(&ok); | |
| 4746 Bind(&fail); | |
| 4747 Abort(kGlobalFunctionsMustHaveInitialMap); | |
| 4748 Bind(&ok); | |
| 4749 } | |
| 4750 } | |
| 4751 | |
| 4752 | |
| 4753 // This is the main Printf implementation. All other Printf variants call | |
| 4754 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. | |
| 4755 void MacroAssembler::PrintfNoPreserve(const char * format, | |
| 4756 const CPURegister& arg0, | |
| 4757 const CPURegister& arg1, | |
| 4758 const CPURegister& arg2, | |
| 4759 const CPURegister& arg3) { | |
| 4760 // We cannot handle a caller-saved stack pointer. It doesn't make much sense | |
| 4761 // in most cases anyway, so this restriction shouldn't be too serious. | |
| 4762 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); | |
| 4763 | |
| 4764 // We cannot print Tmp0() or Tmp1() as they're used internally by the macro | |
| 4765 // assembler. We cannot print the stack pointer because it is typically used | |
| 4766 // to preserve caller-saved registers (using other Printf variants which | |
| 4767 // depend on this helper). | |
| 4768 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0)); | |
| 4769 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1)); | |
| 4770 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2)); | |
| 4771 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3)); | |
| 4772 | |
| 4773 static const int kMaxArgCount = 4; | |
| 4774 // Assume that we have the maximum number of arguments until we know | |
| 4775 // otherwise. | |
| 4776 int arg_count = kMaxArgCount; | |
| 4777 | |
| 4778 // The provided arguments. | |
| 4779 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; | |
| 4780 | |
| 4781 // The PCS registers where the arguments need to end up. | |
| 4782 CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg}; | |
| 4783 | |
| 4784 // Promote FP arguments to doubles, and integer arguments to X registers. | |
| 4785 // Note that FP and integer arguments cannot be mixed, but we'll check | |
| 4786 // AreSameSizeAndType once we've processed these promotions. | |
| 4787 for (int i = 0; i < kMaxArgCount; i++) { | |
| 4788 if (args[i].IsRegister()) { | |
| 4789 // Note that we use x1 onwards, because x0 will hold the format string. | |
| 4790 pcs[i] = Register::XRegFromCode(i + 1); | |
| 4791 // For simplicity, we handle all integer arguments as X registers. An X | |
| 4792 // register argument takes the same space as a W register argument in the | |
| 4793 // PCS anyway. The only limitation is that we must explicitly clear the | |
| 4794 // top word for W register arguments as the callee will expect it to be | |
| 4795 // clear. | |
| 4796 if (!args[i].Is64Bits()) { | |
| 4797 const Register& as_x = args[i].X(); | |
| 4798 And(as_x, as_x, 0x00000000ffffffff); | |
| 4799 args[i] = as_x; | |
| 4800 } | |
| 4801 } else if (args[i].IsFPRegister()) { | |
| 4802 pcs[i] = FPRegister::DRegFromCode(i); | |
| 4803 // C and C++ varargs functions (such as printf) implicitly promote float | |
| 4804 // arguments to doubles. | |
| 4805 if (!args[i].Is64Bits()) { | |
| 4806 FPRegister s(args[i]); | |
| 4807 const FPRegister& as_d = args[i].D(); | |
| 4808 Fcvt(as_d, s); | |
| 4809 args[i] = as_d; | |
| 4810 } | |
| 4811 } else { | |
| 4812 // This is the first empty (NoCPUReg) argument, so use it to set the | |
| 4813 // argument count and bail out. | |
| 4814 arg_count = i; | |
| 4815 break; | |
| 4816 } | |
| 4817 } | |
| 4818 ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount)); | |
| 4819 // Check that every remaining argument is NoCPUReg. | |
| 4820 for (int i = arg_count; i < kMaxArgCount; i++) { | |
| 4821 ASSERT(args[i].IsNone()); | |
| 4822 } | |
| 4823 ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1], | |
| 4824 args[2], args[3], | |
| 4825 pcs[0], pcs[1], | |
| 4826 pcs[2], pcs[3])); | |
| 4827 | |
| 4828 // Move the arguments into the appropriate PCS registers. | |
| 4829 // | |
| 4830 // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is | |
| 4831 // surprisingly complicated. | |
| 4832 // | |
| 4833 // * For even numbers of registers, we push the arguments and then pop them | |
| 4834 // into their final registers. This maintains 16-byte stack alignment in | |
| 4835 // case csp is the stack pointer, since we're only handling X or D | |
| 4836 // registers at this point. | |
| 4837 // | |
| 4838 // * For odd numbers of registers, we push and pop all but one register in | |
| 4839 // the same way, but the left-over register is moved directly, since we | |
| 4840 // can always safely move one register without clobbering any source. | |
| 4841 if (arg_count >= 4) { | |
| 4842 Push(args[3], args[2], args[1], args[0]); | |
| 4843 } else if (arg_count >= 2) { | |
| 4844 Push(args[1], args[0]); | |
| 4845 } | |
| 4846 | |
| 4847 if ((arg_count % 2) != 0) { | |
| 4848 // Move the left-over register directly. | |
| 4849 const CPURegister& leftover_arg = args[arg_count - 1]; | |
| 4850 const CPURegister& leftover_pcs = pcs[arg_count - 1]; | |
| 4851 if (leftover_arg.IsRegister()) { | |
| 4852 Mov(Register(leftover_pcs), Register(leftover_arg)); | |
| 4853 } else { | |
| 4854 Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg)); | |
| 4855 } | |
| 4856 } | |
| 4857 | |
| 4858 if (arg_count >= 4) { | |
| 4859 Pop(pcs[0], pcs[1], pcs[2], pcs[3]); | |
| 4860 } else if (arg_count >= 2) { | |
| 4861 Pop(pcs[0], pcs[1]); | |
| 4862 } | |
| 4863 | |
| 4864 // Load the format string into x0, as per the procedure-call standard. | |
| 4865 // | |
| 4866 // To make the code as portable as possible, the format string is encoded | |
| 4867 // directly in the instruction stream. It might be cleaner to encode it in a | |
| 4868 // literal pool, but since Printf is usually used for debugging, it is | |
| 4869 // beneficial for it to be minimally dependent on other features. | |
| 4870 Label format_address; | |
| 4871 Adr(x0, &format_address); | |
| 4872 | |
| 4873 // Emit the format string directly in the instruction stream. | |
| 4874 { BlockConstPoolScope scope(this); | |
| 4875 Label after_data; | |
| 4876 B(&after_data); | |
| 4877 Bind(&format_address); | |
| 4878 EmitStringData(format); | |
| 4879 Unreachable(); | |
| 4880 Bind(&after_data); | |
| 4881 } | |
| 4882 | |
| 4883 // We don't pass any arguments on the stack, but we still need to align the C | |
| 4884 // stack pointer to a 16-byte boundary for PCS compliance. | |
| 4885 if (!csp.Is(StackPointer())) { | |
| 4886 Bic(csp, StackPointer(), 0xf); | |
| 4887 } | |
| 4888 | |
| 4889 CallPrintf(pcs[0].type()); | |
| 4890 } | |
| 4891 | |
| 4892 | |
| 4893 void MacroAssembler::CallPrintf(CPURegister::RegisterType type) { | |
| 4894 // A call to printf needs special handling for the simulator, since the system | |
| 4895 // printf function will use a different instruction set and the procedure-call | |
| 4896 // standard will not be compatible. | |
| 4897 #ifdef USE_SIMULATOR | |
| 4898 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize); | |
| 4899 hlt(kImmExceptionIsPrintf); | |
| 4900 dc32(type); | |
| 4901 } | |
| 4902 #else | |
| 4903 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); | |
| 4904 #endif | |
| 4905 } | |
| 4906 | |
| 4907 | |
| 4908 void MacroAssembler::Printf(const char * format, | |
| 4909 const CPURegister& arg0, | |
| 4910 const CPURegister& arg1, | |
| 4911 const CPURegister& arg2, | |
| 4912 const CPURegister& arg3) { | |
| 4913 // Preserve all caller-saved registers as well as NZCV. | |
| 4914 // If csp is the stack pointer, PushCPURegList asserts that the size of each | |
| 4915 // list is a multiple of 16 bytes. | |
| 4916 PushCPURegList(kCallerSaved); | |
| 4917 PushCPURegList(kCallerSavedFP); | |
| 4918 // Use Tmp0() as a scratch register. It is not accepted by Printf so it will | |
| 4919 // never overlap an argument register. | |
| 4920 Mrs(Tmp0(), NZCV); | |
| 4921 Push(Tmp0(), xzr); | |
| 4922 | |
| 4923 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); | |
| 4924 | |
| 4925 Pop(xzr, Tmp0()); | |
| 4926 Msr(NZCV, Tmp0()); | |
| 4927 PopCPURegList(kCallerSavedFP); | |
| 4928 PopCPURegList(kCallerSaved); | |
| 4929 } | |
| 4930 | |
| 4931 | |
| 4932 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { | |
| 4933 // TODO(jbramley): Other architectures use the internal memcpy to copy the | |
| 4934 // sequence. If this is a performance bottleneck, we should consider caching | |
| 4935 // the sequence and copying it in the same way. | |
| 4936 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); | |
| 4937 ASSERT(jssp.Is(StackPointer())); | |
| 4938 EmitFrameSetupForCodeAgePatching(this); | |
| 4939 } | |
| 4940 | |
| 4941 | |
| 4942 | |
| 4943 void MacroAssembler::EmitCodeAgeSequence(Code* stub) { | |
| 4944 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); | |
| 4945 ASSERT(jssp.Is(StackPointer())); | |
| 4946 EmitCodeAgeSequence(this, stub); | |
| 4947 } | |
| 4948 | |
| 4949 | |
| 4950 #undef __ | |
| 4951 #define __ assm-> | |
| 4952 | |
| 4953 | |
| 4954 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { | |
| 4955 Label start; | |
| 4956 __ bind(&start); | |
| 4957 | |
| 4958 // We can do this sequence using four instructions, but the code ageing | |
| 4959 // sequence that patches it needs five, so we use the extra space to try to | |
| 4960 // simplify some addressing modes and remove some dependencies (compared to | |
| 4961 // using two stp instructions with write-back). | |
| 4962 __ sub(jssp, jssp, 4 * kXRegSizeInBytes); | |
| 4963 __ sub(csp, csp, 4 * kXRegSizeInBytes); | |
| 4964 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes)); | |
| 4965 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes)); | |
| 4966 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | |
| 4967 | |
| 4968 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); | |
| 4969 } | |
| 4970 | |
| 4971 | |
| 4972 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm, | |
| 4973 Code * stub) { | |
| 4974 Label start; | |
| 4975 __ bind(&start); | |
| 4976 // When the stub is called, the sequence is replaced with the young sequence | |
| 4977 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the | |
| 4978 // stub jumps to &start, stored in x0. The young sequence does not call the | |
| 4979 // stub so there is no infinite loop here. | |
| 4980 // | |
| 4981 // A branch (br) is used rather than a call (blr) because this code replaces | |
| 4982 // the frame setup code that would normally preserve lr. | |
| 4983 __ LoadLiteral(ip0, kCodeAgeStubEntryOffset); | |
| 4984 __ adr(x0, &start); | |
| 4985 __ br(ip0); | |
| 4986 // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up | |
| 4987 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences. | |
| 4988 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); | |
| 4989 if (stub) { | |
| 4990 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); | |
| 4991 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); | |
| 4992 } | |
| 4993 } | |
| 4994 | |
| 4995 | |
| 4996 bool MacroAssembler::IsYoungSequence(byte* sequence) { | |
| 4997 // Generate a young sequence to compare with. | |
| 4998 const int length = kCodeAgeSequenceSize / kInstructionSize; | |
| 4999 static bool initialized = false; | |
| 5000 static byte young[kCodeAgeSequenceSize]; | |
| 5001 if (!initialized) { | |
| 5002 PatchingAssembler patcher(young, length); | |
| 5003 // The young sequence is the frame setup code for FUNCTION code types. It is | |
| 5004 // generated by FullCodeGenerator::Generate. | |
| 5005 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); | |
| 5006 initialized = true; | |
| 5007 } | |
| 5008 | |
| 5009 bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0); | |
| 5010 ASSERT(is_young || IsCodeAgeSequence(sequence)); | |
| 5011 return is_young; | |
| 5012 } | |
| 5013 | |
| 5014 | |
| 5015 #ifdef DEBUG | |
| 5016 bool MacroAssembler::IsCodeAgeSequence(byte* sequence) { | |
| 5017 // The old sequence varies depending on the code age. However, the code up | |
| 5018 // until kCodeAgeStubEntryOffset does not change, so we can check that part to | |
| 5019 // get a reasonable level of verification. | |
| 5020 const int length = kCodeAgeStubEntryOffset / kInstructionSize; | |
| 5021 static bool initialized = false; | |
| 5022 static byte old[kCodeAgeStubEntryOffset]; | |
| 5023 if (!initialized) { | |
| 5024 PatchingAssembler patcher(old, length); | |
| 5025 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL); | |
| 5026 initialized = true; | |
| 5027 } | |
| 5028 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; | |
| 5029 } | |
| 5030 #endif | |
| 5031 | |
| 5032 | |
| 5033 #undef __ | |
| 5034 #define __ masm-> | |
| 5035 | |
| 5036 | |
| 5037 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, | |
| 5038 const Label* smi_check) { | |
| 5039 Assembler::BlockConstPoolScope scope(masm); | |
| 5040 if (reg.IsValid()) { | |
| 5041 ASSERT(smi_check->is_bound()); | |
| 5042 ASSERT(reg.Is64Bits()); | |
| 5043 | |
| 5044 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to | |
| 5045 // 'check' in the other bits. The possible offset is limited in that we | |
| 5046 // use BitField to pack the data, and the underlying data type is a | |
| 5047 // uint32_t. | |
| 5048 uint32_t delta = __ InstructionsGeneratedSince(smi_check); | |
| 5049 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); | |
| 5050 } else { | |
| 5051 ASSERT(!smi_check->is_bound()); | |
| 5052 | |
| 5053 // An offset of 0 indicates that there is no patch site. | |
| 5054 __ InlineData(0); | |
| 5055 } | |
| 5056 } | |
| 5057 | |
| 5058 | |
| 5059 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info) | |
| 5060 : reg_(NoReg), smi_check_(NULL) { | |
| 5061 InstructionSequence* inline_data = InstructionSequence::At(info); | |
| 5062 ASSERT(inline_data->IsInlineData()); | |
| 5063 if (inline_data->IsInlineData()) { | |
| 5064 uint64_t payload = inline_data->InlineData(); | |
| 5065 // We use BitField to decode the payload, and BitField can only handle | |
| 5066 // 32-bit values. | |
| 5067 ASSERT(is_uint32(payload)); | |
| 5068 if (payload != 0) { | |
| 5069 int reg_code = RegisterBits::decode(payload); | |
| 5070 reg_ = Register::XRegFromCode(reg_code); | |
| 5071 uint64_t smi_check_delta = DeltaBits::decode(payload); | |
| 5072 ASSERT(smi_check_delta != 0); | |
| 5073 smi_check_ = inline_data - (smi_check_delta * kInstructionSize); | |
| 5074 } | |
| 5075 } | |
| 5076 } | |
| 5077 | |
| 5078 | |
| 5079 #undef __ | |
| 5080 | |
| 5081 | |
| 5082 } } // namespace v8::internal | |
| 5083 | |
| 5084 #endif // V8_TARGET_ARCH_A64 | |
| OLD | NEW |