| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, | 46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, |
| 47 byte * buffer, | 47 byte * buffer, |
| 48 unsigned buffer_size) | 48 unsigned buffer_size) |
| 49 : Assembler(arg_isolate, buffer, buffer_size), | 49 : Assembler(arg_isolate, buffer, buffer_size), |
| 50 generating_stub_(false), | 50 generating_stub_(false), |
| 51 #if DEBUG | 51 #if DEBUG |
| 52 allow_macro_instructions_(true), | 52 allow_macro_instructions_(true), |
| 53 #endif | 53 #endif |
| 54 has_frame_(false), | 54 has_frame_(false), |
| 55 use_real_aborts_(true), | 55 use_real_aborts_(true), |
| 56 sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) { | 56 sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) { |
| 57 if (isolate() != NULL) { | 57 if (isolate() != NULL) { |
| 58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
| 59 isolate()); | 59 isolate()); |
| 60 } | 60 } |
| 61 } | 61 } |
| 62 | 62 |
| 63 | 63 |
| 64 void MacroAssembler::LogicalMacro(const Register& rd, | 64 void MacroAssembler::LogicalMacro(const Register& rd, |
| 65 const Register& rn, | 65 const Register& rn, |
| 66 const Operand& operand, | 66 const Operand& operand, |
| 67 LogicalOp op) { | 67 LogicalOp op) { |
| 68 UseScratchRegisterScope temps(this); |
| 69 |
| 68 if (operand.NeedsRelocation()) { | 70 if (operand.NeedsRelocation()) { |
| 69 LoadRelocated(Tmp0(), operand); | 71 Register temp = temps.AcquireX(); |
| 70 Logical(rd, rn, Tmp0(), op); | 72 LoadRelocated(temp, operand); |
| 73 Logical(rd, rn, temp, op); |
| 71 | 74 |
| 72 } else if (operand.IsImmediate()) { | 75 } else if (operand.IsImmediate()) { |
| 73 int64_t immediate = operand.immediate(); | 76 int64_t immediate = operand.immediate(); |
| 74 unsigned reg_size = rd.SizeInBits(); | 77 unsigned reg_size = rd.SizeInBits(); |
| 75 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | 78 ASSERT(rd.Is64Bits() || is_uint32(immediate)); |
| 76 | 79 |
| 77 // If the operation is NOT, invert the operation and immediate. | 80 // If the operation is NOT, invert the operation and immediate. |
| 78 if ((op & NOT) == NOT) { | 81 if ((op & NOT) == NOT) { |
| 79 op = static_cast<LogicalOp>(op & ~NOT); | 82 op = static_cast<LogicalOp>(op & ~NOT); |
| 80 immediate = ~immediate; | 83 immediate = ~immediate; |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 118 UNREACHABLE(); | 121 UNREACHABLE(); |
| 119 } | 122 } |
| 120 } | 123 } |
| 121 | 124 |
| 122 unsigned n, imm_s, imm_r; | 125 unsigned n, imm_s, imm_r; |
| 123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { | 126 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { |
| 124 // Immediate can be encoded in the instruction. | 127 // Immediate can be encoded in the instruction. |
| 125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | 128 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); |
| 126 } else { | 129 } else { |
| 127 // Immediate can't be encoded: synthesize using move immediate. | 130 // Immediate can't be encoded: synthesize using move immediate. |
| 128 Register temp = AppropriateTempFor(rn); | 131 Register temp = temps.AcquireSameSizeAs(rn); |
| 129 Mov(temp, immediate); | 132 Mov(temp, immediate); |
| 130 if (rd.Is(csp)) { | 133 if (rd.Is(csp)) { |
| 131 // If rd is the stack pointer we cannot use it as the destination | 134 // If rd is the stack pointer we cannot use it as the destination |
| 132 // register so we use the temp register as an intermediate again. | 135 // register so we use the temp register as an intermediate again. |
| 133 Logical(temp, rn, temp, op); | 136 Logical(temp, rn, temp, op); |
| 134 Mov(csp, temp); | 137 Mov(csp, temp); |
| 135 } else { | 138 } else { |
| 136 Logical(rd, rn, temp, op); | 139 Logical(rd, rn, temp, op); |
| 137 } | 140 } |
| 138 } | 141 } |
| 139 | 142 |
| 140 } else if (operand.IsExtendedRegister()) { | 143 } else if (operand.IsExtendedRegister()) { |
| 141 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 144 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
| 142 // Add/sub extended supports shift <= 4. We want to support exactly the | 145 // Add/sub extended supports shift <= 4. We want to support exactly the |
| 143 // same modes here. | 146 // same modes here. |
| 144 ASSERT(operand.shift_amount() <= 4); | 147 ASSERT(operand.shift_amount() <= 4); |
| 145 ASSERT(operand.reg().Is64Bits() || | 148 ASSERT(operand.reg().Is64Bits() || |
| 146 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | 149 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
| 147 Register temp = AppropriateTempFor(rn, operand.reg()); | 150 Register temp = temps.AcquireSameSizeAs(rn); |
| 148 EmitExtendShift(temp, operand.reg(), operand.extend(), | 151 EmitExtendShift(temp, operand.reg(), operand.extend(), |
| 149 operand.shift_amount()); | 152 operand.shift_amount()); |
| 150 Logical(rd, rn, temp, op); | 153 Logical(rd, rn, temp, op); |
| 151 | 154 |
| 152 } else { | 155 } else { |
| 153 // The operand can be encoded in the instruction. | 156 // The operand can be encoded in the instruction. |
| 154 ASSERT(operand.IsShiftedRegister()); | 157 ASSERT(operand.IsShiftedRegister()); |
| 155 Logical(rd, rn, operand, op); | 158 Logical(rd, rn, operand, op); |
| 156 } | 159 } |
| 157 } | 160 } |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 201 uint64_t ignored_halfword = 0; | 204 uint64_t ignored_halfword = 0; |
| 202 bool invert_move = false; | 205 bool invert_move = false; |
| 203 // If the number of 0xffff halfwords is greater than the number of 0x0000 | 206 // If the number of 0xffff halfwords is greater than the number of 0x0000 |
| 204 // halfwords, it's more efficient to use move-inverted. | 207 // halfwords, it's more efficient to use move-inverted. |
| 205 if (CountClearHalfWords(~imm, reg_size) > | 208 if (CountClearHalfWords(~imm, reg_size) > |
| 206 CountClearHalfWords(imm, reg_size)) { | 209 CountClearHalfWords(imm, reg_size)) { |
| 207 ignored_halfword = 0xffffL; | 210 ignored_halfword = 0xffffL; |
| 208 invert_move = true; | 211 invert_move = true; |
| 209 } | 212 } |
| 210 | 213 |
| 211 // Mov instructions can't move value into the stack pointer, so set up a | 214 // Mov instructions can't move immediate values into the stack pointer, so |
| 212 // temporary register, if needed. | 215 // set up a temporary register, if needed. |
| 213 Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd; | 216 UseScratchRegisterScope temps(this); |
| 217 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; |
| 214 | 218 |
| 215 // Iterate through the halfwords. Use movn/movz for the first non-ignored | 219 // Iterate through the halfwords. Use movn/movz for the first non-ignored |
| 216 // halfword, and movk for subsequent halfwords. | 220 // halfword, and movk for subsequent halfwords. |
| 217 ASSERT((reg_size % 16) == 0); | 221 ASSERT((reg_size % 16) == 0); |
| 218 bool first_mov_done = false; | 222 bool first_mov_done = false; |
| 219 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { | 223 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { |
| 220 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; | 224 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; |
| 221 if (imm16 != ignored_halfword) { | 225 if (imm16 != ignored_halfword) { |
| 222 if (!first_mov_done) { | 226 if (!first_mov_done) { |
| 223 if (invert_move) { | 227 if (invert_move) { |
| (...skipping 17 matching lines...) Expand all Loading... |
| 241 } | 245 } |
| 242 } | 246 } |
| 243 } | 247 } |
| 244 | 248 |
| 245 | 249 |
| 246 void MacroAssembler::Mov(const Register& rd, | 250 void MacroAssembler::Mov(const Register& rd, |
| 247 const Operand& operand, | 251 const Operand& operand, |
| 248 DiscardMoveMode discard_mode) { | 252 DiscardMoveMode discard_mode) { |
| 249 ASSERT(allow_macro_instructions_); | 253 ASSERT(allow_macro_instructions_); |
| 250 ASSERT(!rd.IsZero()); | 254 ASSERT(!rd.IsZero()); |
| 255 |
| 251 // Provide a swap register for instructions that need to write into the | 256 // Provide a swap register for instructions that need to write into the |
| 252 // system stack pointer (and can't do this inherently). | 257 // system stack pointer (and can't do this inherently). |
| 253 Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd); | 258 UseScratchRegisterScope temps(this); |
| 259 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; |
| 254 | 260 |
| 255 if (operand.NeedsRelocation()) { | 261 if (operand.NeedsRelocation()) { |
| 256 LoadRelocated(dst, operand); | 262 LoadRelocated(dst, operand); |
| 257 | 263 |
| 258 } else if (operand.IsImmediate()) { | 264 } else if (operand.IsImmediate()) { |
| 259 // Call the macro assembler for generic immediates. | 265 // Call the macro assembler for generic immediates. |
| 260 Mov(dst, operand.immediate()); | 266 Mov(dst, operand.immediate()); |
| 261 | 267 |
| 262 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 268 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
| 263 // Emit a shift instruction if moving a shifted register. This operation | 269 // Emit a shift instruction if moving a shifted register. This operation |
| (...skipping 20 matching lines...) Expand all Loading... |
| 284 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && | 290 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && |
| 285 (discard_mode == kDontDiscardForSameWReg))) { | 291 (discard_mode == kDontDiscardForSameWReg))) { |
| 286 Assembler::mov(rd, operand.reg()); | 292 Assembler::mov(rd, operand.reg()); |
| 287 } | 293 } |
| 288 // This case can handle writes into the system stack pointer directly. | 294 // This case can handle writes into the system stack pointer directly. |
| 289 dst = rd; | 295 dst = rd; |
| 290 } | 296 } |
| 291 | 297 |
| 292 // Copy the result to the system stack pointer. | 298 // Copy the result to the system stack pointer. |
| 293 if (!dst.Is(rd)) { | 299 if (!dst.Is(rd)) { |
| 294 ASSERT(rd.IsZero()); | 300 ASSERT(rd.IsSP()); |
| 295 ASSERT(dst.Is(Tmp1())); | |
| 296 Assembler::mov(rd, dst); | 301 Assembler::mov(rd, dst); |
| 297 } | 302 } |
| 298 } | 303 } |
| 299 | 304 |
| 300 | 305 |
| 301 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { | 306 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { |
| 302 ASSERT(allow_macro_instructions_); | 307 ASSERT(allow_macro_instructions_); |
| 303 | 308 |
| 304 if (operand.NeedsRelocation()) { | 309 if (operand.NeedsRelocation()) { |
| 305 LoadRelocated(Tmp0(), operand); | 310 LoadRelocated(rd, operand); |
| 306 Mvn(rd, Tmp0()); | 311 mvn(rd, rd); |
| 307 | 312 |
| 308 } else if (operand.IsImmediate()) { | 313 } else if (operand.IsImmediate()) { |
| 309 // Call the macro assembler for generic immediates. | 314 // Call the macro assembler for generic immediates. |
| 310 Mov(rd, ~operand.immediate()); | 315 Mov(rd, ~operand.immediate()); |
| 311 | 316 |
| 312 } else if (operand.IsExtendedRegister()) { | 317 } else if (operand.IsExtendedRegister()) { |
| 313 // Emit two instructions for the extend case. This differs from Mov, as | 318 // Emit two instructions for the extend case. This differs from Mov, as |
| 314 // the extend and invert can't be achieved in one instruction. | 319 // the extend and invert can't be achieved in one instruction. |
| 315 Register temp = AppropriateTempFor(rd, operand.reg()); | 320 EmitExtendShift(rd, operand.reg(), operand.extend(), |
| 316 EmitExtendShift(temp, operand.reg(), operand.extend(), | |
| 317 operand.shift_amount()); | 321 operand.shift_amount()); |
| 318 mvn(rd, temp); | 322 mvn(rd, rd); |
| 319 | 323 |
| 320 } else { | 324 } else { |
| 321 // Otherwise, emit a register move only if the registers are distinct. | |
| 322 // If the jssp is an operand, add #0 is emitted, otherwise, orr #0. | |
| 323 mvn(rd, operand); | 325 mvn(rd, operand); |
| 324 } | 326 } |
| 325 } | 327 } |
| 326 | 328 |
| 327 | 329 |
| 328 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { | 330 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { |
| 329 ASSERT((reg_size % 8) == 0); | 331 ASSERT((reg_size % 8) == 0); |
| 330 int count = 0; | 332 int count = 0; |
| 331 for (unsigned i = 0; i < (reg_size / 16); i++) { | 333 for (unsigned i = 0; i < (reg_size / 16); i++) { |
| 332 if ((imm & 0xffff) == 0) { | 334 if ((imm & 0xffff) == 0) { |
| 333 count++; | 335 count++; |
| 334 } | 336 } |
| 335 imm >>= 16; | 337 imm >>= 16; |
| 336 } | 338 } |
| 337 return count; | 339 return count; |
| 338 } | 340 } |
| 339 | 341 |
| 340 | 342 |
| 341 // The movz instruction can generate immediates containing an arbitrary 16-bit | 343 // The movz instruction can generate immediates containing an arbitrary 16-bit |
| 342 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. | 344 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. |
| 343 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { | 345 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { |
| 344 ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); | 346 ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); |
| 345 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); | 347 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); |
| 346 } | 348 } |
| 347 | 349 |
| 348 | 350 |
| 349 // The movn instruction can generate immediates containing an arbitrary 16-bit | 351 // The movn instruction can generate immediates containing an arbitrary 16-bit |
| 350 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. | 352 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. |
| 351 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { | 353 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { |
| 352 return IsImmMovz(~imm, reg_size); | 354 return IsImmMovz(~imm, reg_size); |
| 353 } | 355 } |
| 354 | 356 |
| 355 | 357 |
| 356 void MacroAssembler::ConditionalCompareMacro(const Register& rn, | 358 void MacroAssembler::ConditionalCompareMacro(const Register& rn, |
| 357 const Operand& operand, | 359 const Operand& operand, |
| 358 StatusFlags nzcv, | 360 StatusFlags nzcv, |
| 359 Condition cond, | 361 Condition cond, |
| 360 ConditionalCompareOp op) { | 362 ConditionalCompareOp op) { |
| 361 ASSERT((cond != al) && (cond != nv)); | 363 ASSERT((cond != al) && (cond != nv)); |
| 362 if (operand.NeedsRelocation()) { | 364 if (operand.NeedsRelocation()) { |
| 363 LoadRelocated(Tmp0(), operand); | 365 UseScratchRegisterScope temps(this); |
| 364 ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op); | 366 Register temp = temps.AcquireX(); |
| 367 LoadRelocated(temp, operand); |
| 368 ConditionalCompareMacro(rn, temp, nzcv, cond, op); |
| 365 | 369 |
| 366 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || | 370 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || |
| 367 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { | 371 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { |
| 368 // The immediate can be encoded in the instruction, or the operand is an | 372 // The immediate can be encoded in the instruction, or the operand is an |
| 369 // unshifted register: call the assembler. | 373 // unshifted register: call the assembler. |
| 370 ConditionalCompare(rn, operand, nzcv, cond, op); | 374 ConditionalCompare(rn, operand, nzcv, cond, op); |
| 371 | 375 |
| 372 } else { | 376 } else { |
| 373 // The operand isn't directly supported by the instruction: perform the | 377 // The operand isn't directly supported by the instruction: perform the |
| 374 // operation on a temporary register. | 378 // operation on a temporary register. |
| 375 Register temp = AppropriateTempFor(rn); | 379 UseScratchRegisterScope temps(this); |
| 380 Register temp = temps.AcquireSameSizeAs(rn); |
| 376 Mov(temp, operand); | 381 Mov(temp, operand); |
| 377 ConditionalCompare(rn, temp, nzcv, cond, op); | 382 ConditionalCompare(rn, temp, nzcv, cond, op); |
| 378 } | 383 } |
| 379 } | 384 } |
| 380 | 385 |
| 381 | 386 |
| 382 void MacroAssembler::Csel(const Register& rd, | 387 void MacroAssembler::Csel(const Register& rd, |
| 383 const Register& rn, | 388 const Register& rn, |
| 384 const Operand& operand, | 389 const Operand& operand, |
| 385 Condition cond) { | 390 Condition cond) { |
| 386 ASSERT(allow_macro_instructions_); | 391 ASSERT(allow_macro_instructions_); |
| 387 ASSERT(!rd.IsZero()); | 392 ASSERT(!rd.IsZero()); |
| 388 ASSERT((cond != al) && (cond != nv)); | 393 ASSERT((cond != al) && (cond != nv)); |
| 389 if (operand.IsImmediate()) { | 394 if (operand.IsImmediate()) { |
| 390 // Immediate argument. Handle special cases of 0, 1 and -1 using zero | 395 // Immediate argument. Handle special cases of 0, 1 and -1 using zero |
| 391 // register. | 396 // register. |
| 392 int64_t imm = operand.immediate(); | 397 int64_t imm = operand.immediate(); |
| 393 Register zr = AppropriateZeroRegFor(rn); | 398 Register zr = AppropriateZeroRegFor(rn); |
| 394 if (imm == 0) { | 399 if (imm == 0) { |
| 395 csel(rd, rn, zr, cond); | 400 csel(rd, rn, zr, cond); |
| 396 } else if (imm == 1) { | 401 } else if (imm == 1) { |
| 397 csinc(rd, rn, zr, cond); | 402 csinc(rd, rn, zr, cond); |
| 398 } else if (imm == -1) { | 403 } else if (imm == -1) { |
| 399 csinv(rd, rn, zr, cond); | 404 csinv(rd, rn, zr, cond); |
| 400 } else { | 405 } else { |
| 401 Register temp = AppropriateTempFor(rn); | 406 UseScratchRegisterScope temps(this); |
| 407 Register temp = temps.AcquireSameSizeAs(rn); |
| 402 Mov(temp, operand.immediate()); | 408 Mov(temp, operand.immediate()); |
| 403 csel(rd, rn, temp, cond); | 409 csel(rd, rn, temp, cond); |
| 404 } | 410 } |
| 405 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { | 411 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { |
| 406 // Unshifted register argument. | 412 // Unshifted register argument. |
| 407 csel(rd, rn, operand.reg(), cond); | 413 csel(rd, rn, operand.reg(), cond); |
| 408 } else { | 414 } else { |
| 409 // All other arguments. | 415 // All other arguments. |
| 410 Register temp = AppropriateTempFor(rn); | 416 UseScratchRegisterScope temps(this); |
| 417 Register temp = temps.AcquireSameSizeAs(rn); |
| 411 Mov(temp, operand); | 418 Mov(temp, operand); |
| 412 csel(rd, rn, temp, cond); | 419 csel(rd, rn, temp, cond); |
| 413 } | 420 } |
| 414 } | 421 } |
| 415 | 422 |
| 416 | 423 |
| 417 void MacroAssembler::AddSubMacro(const Register& rd, | 424 void MacroAssembler::AddSubMacro(const Register& rd, |
| 418 const Register& rn, | 425 const Register& rn, |
| 419 const Operand& operand, | 426 const Operand& operand, |
| 420 FlagsUpdate S, | 427 FlagsUpdate S, |
| 421 AddSubOp op) { | 428 AddSubOp op) { |
| 422 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && | 429 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && |
| 423 !operand.NeedsRelocation() && (S == LeaveFlags)) { | 430 !operand.NeedsRelocation() && (S == LeaveFlags)) { |
| 424 // The instruction would be a nop. Avoid generating useless code. | 431 // The instruction would be a nop. Avoid generating useless code. |
| 425 return; | 432 return; |
| 426 } | 433 } |
| 427 | 434 |
| 428 if (operand.NeedsRelocation()) { | 435 if (operand.NeedsRelocation()) { |
| 429 LoadRelocated(Tmp0(), operand); | 436 UseScratchRegisterScope temps(this); |
| 430 AddSubMacro(rd, rn, Tmp0(), S, op); | 437 Register temp = temps.AcquireX(); |
| 438 LoadRelocated(temp, operand); |
| 439 AddSubMacro(rd, rn, temp, S, op); |
| 431 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || | 440 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || |
| 432 (rn.IsZero() && !operand.IsShiftedRegister()) || | 441 (rn.IsZero() && !operand.IsShiftedRegister()) || |
| 433 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 442 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
| 434 Register temp = AppropriateTempFor(rn); | 443 UseScratchRegisterScope temps(this); |
| 444 Register temp = temps.AcquireSameSizeAs(rn); |
| 435 Mov(temp, operand); | 445 Mov(temp, operand); |
| 436 AddSub(rd, rn, temp, S, op); | 446 AddSub(rd, rn, temp, S, op); |
| 437 } else { | 447 } else { |
| 438 AddSub(rd, rn, operand, S, op); | 448 AddSub(rd, rn, operand, S, op); |
| 439 } | 449 } |
| 440 } | 450 } |
| 441 | 451 |
| 442 | 452 |
| 443 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, | 453 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, |
| 444 const Register& rn, | 454 const Register& rn, |
| 445 const Operand& operand, | 455 const Operand& operand, |
| 446 FlagsUpdate S, | 456 FlagsUpdate S, |
| 447 AddSubWithCarryOp op) { | 457 AddSubWithCarryOp op) { |
| 448 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 458 ASSERT(rd.SizeInBits() == rn.SizeInBits()); |
| 459 UseScratchRegisterScope temps(this); |
| 449 | 460 |
| 450 if (operand.NeedsRelocation()) { | 461 if (operand.NeedsRelocation()) { |
| 451 LoadRelocated(Tmp0(), operand); | 462 Register temp = temps.AcquireX(); |
| 452 AddSubWithCarryMacro(rd, rn, Tmp0(), S, op); | 463 LoadRelocated(temp, operand); |
| 464 AddSubWithCarryMacro(rd, rn, temp, S, op); |
| 453 | 465 |
| 454 } else if (operand.IsImmediate() || | 466 } else if (operand.IsImmediate() || |
| 455 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 467 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
| 456 // Add/sub with carry (immediate or ROR shifted register.) | 468 // Add/sub with carry (immediate or ROR shifted register.) |
| 457 Register temp = AppropriateTempFor(rn); | 469 Register temp = temps.AcquireSameSizeAs(rn); |
| 458 Mov(temp, operand); | 470 Mov(temp, operand); |
| 459 AddSubWithCarry(rd, rn, temp, S, op); | 471 AddSubWithCarry(rd, rn, temp, S, op); |
| 472 |
| 460 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 473 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
| 461 // Add/sub with carry (shifted register). | 474 // Add/sub with carry (shifted register). |
| 462 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | 475 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); |
| 463 ASSERT(operand.shift() != ROR); | 476 ASSERT(operand.shift() != ROR); |
| 464 ASSERT(is_uintn(operand.shift_amount(), | 477 ASSERT(is_uintn(operand.shift_amount(), |
| 465 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2)); | 478 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2 |
| 466 Register temp = AppropriateTempFor(rn, operand.reg()); | 479 : kWRegSizeInBitsLog2)); |
| 480 Register temp = temps.AcquireSameSizeAs(rn); |
| 467 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); | 481 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); |
| 468 AddSubWithCarry(rd, rn, temp, S, op); | 482 AddSubWithCarry(rd, rn, temp, S, op); |
| 469 | 483 |
| 470 } else if (operand.IsExtendedRegister()) { | 484 } else if (operand.IsExtendedRegister()) { |
| 471 // Add/sub with carry (extended register). | 485 // Add/sub with carry (extended register). |
| 472 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 486 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
| 473 // Add/sub extended supports a shift <= 4. We want to support exactly the | 487 // Add/sub extended supports a shift <= 4. We want to support exactly the |
| 474 // same modes. | 488 // same modes. |
| 475 ASSERT(operand.shift_amount() <= 4); | 489 ASSERT(operand.shift_amount() <= 4); |
| 476 ASSERT(operand.reg().Is64Bits() || | 490 ASSERT(operand.reg().Is64Bits() || |
| 477 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | 491 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
| 478 Register temp = AppropriateTempFor(rn, operand.reg()); | 492 Register temp = temps.AcquireSameSizeAs(rn); |
| 479 EmitExtendShift(temp, operand.reg(), operand.extend(), | 493 EmitExtendShift(temp, operand.reg(), operand.extend(), |
| 480 operand.shift_amount()); | 494 operand.shift_amount()); |
| 481 AddSubWithCarry(rd, rn, temp, S, op); | 495 AddSubWithCarry(rd, rn, temp, S, op); |
| 482 | 496 |
| 483 } else { | 497 } else { |
| 484 // The addressing mode is directly supported by the instruction. | 498 // The addressing mode is directly supported by the instruction. |
| 485 AddSubWithCarry(rd, rn, operand, S, op); | 499 AddSubWithCarry(rd, rn, operand, S, op); |
| 486 } | 500 } |
| 487 } | 501 } |
| 488 | 502 |
| 489 | 503 |
| 490 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, | 504 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, |
| 491 const MemOperand& addr, | 505 const MemOperand& addr, |
| 492 LoadStoreOp op) { | 506 LoadStoreOp op) { |
| 493 int64_t offset = addr.offset(); | 507 int64_t offset = addr.offset(); |
| 494 LSDataSize size = CalcLSDataSize(op); | 508 LSDataSize size = CalcLSDataSize(op); |
| 495 | 509 |
| 496 // Check if an immediate offset fits in the immediate field of the | 510 // Check if an immediate offset fits in the immediate field of the |
| 497 // appropriate instruction. If not, emit two instructions to perform | 511 // appropriate instruction. If not, emit two instructions to perform |
| 498 // the operation. | 512 // the operation. |
| 499 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && | 513 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && |
| 500 !IsImmLSUnscaled(offset)) { | 514 !IsImmLSUnscaled(offset)) { |
| 501 // Immediate offset that can't be encoded using unsigned or unscaled | 515 // Immediate offset that can't be encoded using unsigned or unscaled |
| 502 // addressing modes. | 516 // addressing modes. |
| 503 Register temp = AppropriateTempFor(addr.base()); | 517 UseScratchRegisterScope temps(this); |
| 518 Register temp = temps.AcquireSameSizeAs(addr.base()); |
| 504 Mov(temp, addr.offset()); | 519 Mov(temp, addr.offset()); |
| 505 LoadStore(rt, MemOperand(addr.base(), temp), op); | 520 LoadStore(rt, MemOperand(addr.base(), temp), op); |
| 506 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { | 521 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { |
| 507 // Post-index beyond unscaled addressing range. | 522 // Post-index beyond unscaled addressing range. |
| 508 LoadStore(rt, MemOperand(addr.base()), op); | 523 LoadStore(rt, MemOperand(addr.base()), op); |
| 509 add(addr.base(), addr.base(), offset); | 524 add(addr.base(), addr.base(), offset); |
| 510 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { | 525 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { |
| 511 // Pre-index beyond unscaled addressing range. | 526 // Pre-index beyond unscaled addressing range. |
| 512 add(addr.base(), addr.base(), offset); | 527 add(addr.base(), addr.base(), offset); |
| 513 LoadStore(rt, MemOperand(addr.base()), op); | 528 LoadStore(rt, MemOperand(addr.base()), op); |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 551 Strh(rt, addr); | 566 Strh(rt, addr); |
| 552 } else if (r.IsInteger32()) { | 567 } else if (r.IsInteger32()) { |
| 553 Str(rt.W(), addr); | 568 Str(rt.W(), addr); |
| 554 } else { | 569 } else { |
| 555 ASSERT(rt.Is64Bits()); | 570 ASSERT(rt.Is64Bits()); |
| 556 Str(rt, addr); | 571 Str(rt, addr); |
| 557 } | 572 } |
| 558 } | 573 } |
| 559 | 574 |
| 560 | 575 |
| 561 bool MacroAssembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { | |
| 562 // Account for the branch around the veneers and the guard. | |
| 563 int protection_offset = 2 * kInstructionSize; | |
| 564 return pc_offset() > max_reachable_pc - margin - protection_offset - | |
| 565 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize); | |
| 566 } | |
| 567 | |
| 568 | |
| 569 void MacroAssembler::EmitVeneers(bool need_protection) { | |
| 570 RecordComment("[ Veneers"); | |
| 571 | |
| 572 Label end; | |
| 573 if (need_protection) { | |
| 574 B(&end); | |
| 575 } | |
| 576 | |
| 577 EmitVeneersGuard(); | |
| 578 | |
| 579 { | |
| 580 InstructionAccurateScope scope(this); | |
| 581 Label size_check; | |
| 582 | |
| 583 std::multimap<int, FarBranchInfo>::iterator it, it_to_delete; | |
| 584 | |
| 585 it = unresolved_branches_.begin(); | |
| 586 while (it != unresolved_branches_.end()) { | |
| 587 if (ShouldEmitVeneer(it->first)) { | |
| 588 Instruction* branch = InstructionAt(it->second.pc_offset_); | |
| 589 Label* label = it->second.label_; | |
| 590 | |
| 591 #ifdef DEBUG | |
| 592 __ bind(&size_check); | |
| 593 #endif | |
| 594 // Patch the branch to point to the current position, and emit a branch | |
| 595 // to the label. | |
| 596 Instruction* veneer = reinterpret_cast<Instruction*>(pc_); | |
| 597 RemoveBranchFromLabelLinkChain(branch, label, veneer); | |
| 598 branch->SetImmPCOffsetTarget(veneer); | |
| 599 b(label); | |
| 600 #ifdef DEBUG | |
| 601 ASSERT(SizeOfCodeGeneratedSince(&size_check) <= | |
| 602 static_cast<uint64_t>(kMaxVeneerCodeSize)); | |
| 603 size_check.Unuse(); | |
| 604 #endif | |
| 605 | |
| 606 it_to_delete = it++; | |
| 607 unresolved_branches_.erase(it_to_delete); | |
| 608 } else { | |
| 609 ++it; | |
| 610 } | |
| 611 } | |
| 612 } | |
| 613 | |
| 614 Bind(&end); | |
| 615 | |
| 616 RecordComment("]"); | |
| 617 } | |
| 618 | |
| 619 | |
| 620 void MacroAssembler::EmitVeneersGuard() { | |
| 621 if (emit_debug_code()) { | |
| 622 Unreachable(); | |
| 623 } | |
| 624 } | |
| 625 | |
| 626 | |
| 627 void MacroAssembler::CheckVeneers(bool need_protection) { | |
| 628 if (unresolved_branches_.empty()) { | |
| 629 return; | |
| 630 } | |
| 631 | |
| 632 CHECK(pc_offset() < unresolved_branches_first_limit()); | |
| 633 int margin = kVeneerDistanceMargin; | |
| 634 if (!need_protection) { | |
| 635 // Prefer emitting veneers protected by an existing instruction. | |
| 636 // The 4 divisor is a finger in the air guess. With a default margin of 2KB, | |
| 637 // that leaves 512B = 128 instructions of extra margin to avoid requiring a | |
| 638 // protective branch. | |
| 639 margin += margin / 4; | |
| 640 } | |
| 641 if (ShouldEmitVeneer(unresolved_branches_first_limit(), margin)) { | |
| 642 EmitVeneers(need_protection); | |
| 643 } | |
| 644 } | |
| 645 | |
| 646 | |
| 647 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( | 576 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( |
| 648 Label *label, ImmBranchType b_type) { | 577 Label *label, ImmBranchType b_type) { |
| 649 bool need_longer_range = false; | 578 bool need_longer_range = false; |
| 650 // There are two situations in which we care about the offset being out of | 579 // There are two situations in which we care about the offset being out of |
| 651 // range: | 580 // range: |
| 652 // - The label is bound but too far away. | 581 // - The label is bound but too far away. |
| 653 // - The label is not bound but linked, and the previous branch | 582 // - The label is not bound but linked, and the previous branch |
| 654 // instruction in the chain is too far away. | 583 // instruction in the chain is too far away. |
| 655 if (label->is_bound() || label->is_linked()) { | 584 if (label->is_bound() || label->is_linked()) { |
| 656 need_longer_range = | 585 need_longer_range = |
| 657 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset()); | 586 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset()); |
| 658 } | 587 } |
| 659 if (!need_longer_range && !label->is_bound()) { | 588 if (!need_longer_range && !label->is_bound()) { |
| 660 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type); | 589 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type); |
| 661 unresolved_branches_.insert( | 590 unresolved_branches_.insert( |
| 662 std::pair<int, FarBranchInfo>(max_reachable_pc, | 591 std::pair<int, FarBranchInfo>(max_reachable_pc, |
| 663 FarBranchInfo(pc_offset(), label))); | 592 FarBranchInfo(pc_offset(), label))); |
| 593 // Also maintain the next pool check. |
| 594 next_veneer_pool_check_ = |
| 595 Min(next_veneer_pool_check_, |
| 596 max_reachable_pc - kVeneerDistanceCheckMargin); |
| 664 } | 597 } |
| 665 return need_longer_range; | 598 return need_longer_range; |
| 666 } | 599 } |
| 667 | 600 |
| 668 | 601 |
| 669 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { | 602 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { |
| 670 ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && | 603 ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && |
| 671 (bit == -1 || type >= kBranchTypeFirstUsingBit)); | 604 (bit == -1 || type >= kBranchTypeFirstUsingBit)); |
| 672 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { | 605 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { |
| 673 B(static_cast<Condition>(type), label); | 606 B(static_cast<Condition>(type), label); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 689 void MacroAssembler::B(Label* label, Condition cond) { | 622 void MacroAssembler::B(Label* label, Condition cond) { |
| 690 ASSERT(allow_macro_instructions_); | 623 ASSERT(allow_macro_instructions_); |
| 691 ASSERT((cond != al) && (cond != nv)); | 624 ASSERT((cond != al) && (cond != nv)); |
| 692 | 625 |
| 693 Label done; | 626 Label done; |
| 694 bool need_extra_instructions = | 627 bool need_extra_instructions = |
| 695 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType); | 628 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType); |
| 696 | 629 |
| 697 if (need_extra_instructions) { | 630 if (need_extra_instructions) { |
| 698 b(&done, InvertCondition(cond)); | 631 b(&done, InvertCondition(cond)); |
| 699 b(label); | 632 B(label); |
| 700 } else { | 633 } else { |
| 701 b(label, cond); | 634 b(label, cond); |
| 702 } | 635 } |
| 703 CheckVeneers(!need_extra_instructions); | |
| 704 bind(&done); | 636 bind(&done); |
| 705 } | 637 } |
| 706 | 638 |
| 707 | 639 |
| 708 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { | 640 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { |
| 709 ASSERT(allow_macro_instructions_); | 641 ASSERT(allow_macro_instructions_); |
| 710 | 642 |
| 711 Label done; | 643 Label done; |
| 712 bool need_extra_instructions = | 644 bool need_extra_instructions = |
| 713 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); | 645 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); |
| 714 | 646 |
| 715 if (need_extra_instructions) { | 647 if (need_extra_instructions) { |
| 716 tbz(rt, bit_pos, &done); | 648 tbz(rt, bit_pos, &done); |
| 717 b(label); | 649 B(label); |
| 718 } else { | 650 } else { |
| 719 tbnz(rt, bit_pos, label); | 651 tbnz(rt, bit_pos, label); |
| 720 } | 652 } |
| 721 CheckVeneers(!need_extra_instructions); | |
| 722 bind(&done); | 653 bind(&done); |
| 723 } | 654 } |
| 724 | 655 |
| 725 | 656 |
| 726 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { | 657 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { |
| 727 ASSERT(allow_macro_instructions_); | 658 ASSERT(allow_macro_instructions_); |
| 728 | 659 |
| 729 Label done; | 660 Label done; |
| 730 bool need_extra_instructions = | 661 bool need_extra_instructions = |
| 731 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); | 662 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); |
| 732 | 663 |
| 733 if (need_extra_instructions) { | 664 if (need_extra_instructions) { |
| 734 tbnz(rt, bit_pos, &done); | 665 tbnz(rt, bit_pos, &done); |
| 735 b(label); | 666 B(label); |
| 736 } else { | 667 } else { |
| 737 tbz(rt, bit_pos, label); | 668 tbz(rt, bit_pos, label); |
| 738 } | 669 } |
| 739 CheckVeneers(!need_extra_instructions); | |
| 740 bind(&done); | 670 bind(&done); |
| 741 } | 671 } |
| 742 | 672 |
| 743 | 673 |
| 744 void MacroAssembler::Cbnz(const Register& rt, Label* label) { | 674 void MacroAssembler::Cbnz(const Register& rt, Label* label) { |
| 745 ASSERT(allow_macro_instructions_); | 675 ASSERT(allow_macro_instructions_); |
| 746 | 676 |
| 747 Label done; | 677 Label done; |
| 748 bool need_extra_instructions = | 678 bool need_extra_instructions = |
| 749 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); | 679 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); |
| 750 | 680 |
| 751 if (need_extra_instructions) { | 681 if (need_extra_instructions) { |
| 752 cbz(rt, &done); | 682 cbz(rt, &done); |
| 753 b(label); | 683 B(label); |
| 754 } else { | 684 } else { |
| 755 cbnz(rt, label); | 685 cbnz(rt, label); |
| 756 } | 686 } |
| 757 CheckVeneers(!need_extra_instructions); | |
| 758 bind(&done); | 687 bind(&done); |
| 759 } | 688 } |
| 760 | 689 |
| 761 | 690 |
| 762 void MacroAssembler::Cbz(const Register& rt, Label* label) { | 691 void MacroAssembler::Cbz(const Register& rt, Label* label) { |
| 763 ASSERT(allow_macro_instructions_); | 692 ASSERT(allow_macro_instructions_); |
| 764 | 693 |
| 765 Label done; | 694 Label done; |
| 766 bool need_extra_instructions = | 695 bool need_extra_instructions = |
| 767 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); | 696 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); |
| 768 | 697 |
| 769 if (need_extra_instructions) { | 698 if (need_extra_instructions) { |
| 770 cbnz(rt, &done); | 699 cbnz(rt, &done); |
| 771 b(label); | 700 B(label); |
| 772 } else { | 701 } else { |
| 773 cbz(rt, label); | 702 cbz(rt, label); |
| 774 } | 703 } |
| 775 CheckVeneers(!need_extra_instructions); | |
| 776 bind(&done); | 704 bind(&done); |
| 777 } | 705 } |
| 778 | 706 |
| 779 | 707 |
| 780 // Pseudo-instructions. | 708 // Pseudo-instructions. |
| 781 | 709 |
| 782 | 710 |
| 783 void MacroAssembler::Abs(const Register& rd, const Register& rm, | 711 void MacroAssembler::Abs(const Register& rd, const Register& rm, |
| 784 Label* is_not_representable, | 712 Label* is_not_representable, |
| 785 Label* is_representable) { | 713 Label* is_representable) { |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 936 } | 864 } |
| 937 } | 865 } |
| 938 | 866 |
| 939 | 867 |
| 940 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { | 868 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { |
| 941 int size = src.SizeInBytes(); | 869 int size = src.SizeInBytes(); |
| 942 | 870 |
| 943 PrepareForPush(count, size); | 871 PrepareForPush(count, size); |
| 944 | 872 |
| 945 if (FLAG_optimize_for_size && count > 8) { | 873 if (FLAG_optimize_for_size && count > 8) { |
| 874 UseScratchRegisterScope temps(this); |
| 875 Register temp = temps.AcquireX(); |
| 876 |
| 946 Label loop; | 877 Label loop; |
| 947 __ Mov(Tmp0(), count / 2); | 878 __ Mov(temp, count / 2); |
| 948 __ Bind(&loop); | 879 __ Bind(&loop); |
| 949 PushHelper(2, size, src, src, NoReg, NoReg); | 880 PushHelper(2, size, src, src, NoReg, NoReg); |
| 950 __ Subs(Tmp0(), Tmp0(), 1); | 881 __ Subs(temp, temp, 1); |
| 951 __ B(ne, &loop); | 882 __ B(ne, &loop); |
| 952 | 883 |
| 953 count %= 2; | 884 count %= 2; |
| 954 } | 885 } |
| 955 | 886 |
| 956 // Push up to four registers at a time if possible because if the current | 887 // Push up to four registers at a time if possible because if the current |
| 957 // stack pointer is csp and the register size is 32, registers must be pushed | 888 // stack pointer is csp and the register size is 32, registers must be pushed |
| 958 // in blocks of four in order to maintain the 16-byte alignment for csp. | 889 // in blocks of four in order to maintain the 16-byte alignment for csp. |
| 959 while (count >= 4) { | 890 while (count >= 4) { |
| 960 PushHelper(4, size, src, src, src, src); | 891 PushHelper(4, size, src, src, src, src); |
| 961 count -= 4; | 892 count -= 4; |
| 962 } | 893 } |
| 963 if (count >= 2) { | 894 if (count >= 2) { |
| 964 PushHelper(2, size, src, src, NoReg, NoReg); | 895 PushHelper(2, size, src, src, NoReg, NoReg); |
| 965 count -= 2; | 896 count -= 2; |
| 966 } | 897 } |
| 967 if (count == 1) { | 898 if (count == 1) { |
| 968 PushHelper(1, size, src, NoReg, NoReg, NoReg); | 899 PushHelper(1, size, src, NoReg, NoReg, NoReg); |
| 969 count -= 1; | 900 count -= 1; |
| 970 } | 901 } |
| 971 ASSERT(count == 0); | 902 ASSERT(count == 0); |
| 972 } | 903 } |
| 973 | 904 |
| 974 | 905 |
| 975 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { | 906 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { |
| 976 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); | 907 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); |
| 977 | 908 |
| 978 Register temp = AppropriateTempFor(count); | 909 UseScratchRegisterScope temps(this); |
| 910 Register temp = temps.AcquireSameSizeAs(count); |
| 979 | 911 |
| 980 if (FLAG_optimize_for_size) { | 912 if (FLAG_optimize_for_size) { |
| 981 Label loop, done; | 913 Label loop, done; |
| 982 | 914 |
| 983 Subs(temp, count, 1); | 915 Subs(temp, count, 1); |
| 984 B(mi, &done); | 916 B(mi, &done); |
| 985 | 917 |
| 986 // Push all registers individually, to save code size. | 918 // Push all registers individually, to save code size. |
| 987 Bind(&loop); | 919 Bind(&loop); |
| 988 Subs(temp, temp, 1); | 920 Subs(temp, temp, 1); |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1179 | 1111 |
| 1180 | 1112 |
| 1181 void MacroAssembler::PushCalleeSavedRegisters() { | 1113 void MacroAssembler::PushCalleeSavedRegisters() { |
| 1182 // Ensure that the macro-assembler doesn't use any scratch registers. | 1114 // Ensure that the macro-assembler doesn't use any scratch registers. |
| 1183 InstructionAccurateScope scope(this); | 1115 InstructionAccurateScope scope(this); |
| 1184 | 1116 |
| 1185 // This method must not be called unless the current stack pointer is the | 1117 // This method must not be called unless the current stack pointer is the |
| 1186 // system stack pointer (csp). | 1118 // system stack pointer (csp). |
| 1187 ASSERT(csp.Is(StackPointer())); | 1119 ASSERT(csp.Is(StackPointer())); |
| 1188 | 1120 |
| 1189 MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex); | 1121 MemOperand tos(csp, -2 * kXRegSize, PreIndex); |
| 1190 | 1122 |
| 1191 stp(d14, d15, tos); | 1123 stp(d14, d15, tos); |
| 1192 stp(d12, d13, tos); | 1124 stp(d12, d13, tos); |
| 1193 stp(d10, d11, tos); | 1125 stp(d10, d11, tos); |
| 1194 stp(d8, d9, tos); | 1126 stp(d8, d9, tos); |
| 1195 | 1127 |
| 1196 stp(x29, x30, tos); | 1128 stp(x29, x30, tos); |
| 1197 stp(x27, x28, tos); // x28 = jssp | 1129 stp(x27, x28, tos); // x28 = jssp |
| 1198 stp(x25, x26, tos); | 1130 stp(x25, x26, tos); |
| 1199 stp(x23, x24, tos); | 1131 stp(x23, x24, tos); |
| 1200 stp(x21, x22, tos); | 1132 stp(x21, x22, tos); |
| 1201 stp(x19, x20, tos); | 1133 stp(x19, x20, tos); |
| 1202 } | 1134 } |
| 1203 | 1135 |
| 1204 | 1136 |
| 1205 void MacroAssembler::PopCalleeSavedRegisters() { | 1137 void MacroAssembler::PopCalleeSavedRegisters() { |
| 1206 // Ensure that the macro-assembler doesn't use any scratch registers. | 1138 // Ensure that the macro-assembler doesn't use any scratch registers. |
| 1207 InstructionAccurateScope scope(this); | 1139 InstructionAccurateScope scope(this); |
| 1208 | 1140 |
| 1209 // This method must not be called unless the current stack pointer is the | 1141 // This method must not be called unless the current stack pointer is the |
| 1210 // system stack pointer (csp). | 1142 // system stack pointer (csp). |
| 1211 ASSERT(csp.Is(StackPointer())); | 1143 ASSERT(csp.Is(StackPointer())); |
| 1212 | 1144 |
| 1213 MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex); | 1145 MemOperand tos(csp, 2 * kXRegSize, PostIndex); |
| 1214 | 1146 |
| 1215 ldp(x19, x20, tos); | 1147 ldp(x19, x20, tos); |
| 1216 ldp(x21, x22, tos); | 1148 ldp(x21, x22, tos); |
| 1217 ldp(x23, x24, tos); | 1149 ldp(x23, x24, tos); |
| 1218 ldp(x25, x26, tos); | 1150 ldp(x25, x26, tos); |
| 1219 ldp(x27, x28, tos); // x28 = jssp | 1151 ldp(x27, x28, tos); // x28 = jssp |
| 1220 ldp(x29, x30, tos); | 1152 ldp(x29, x30, tos); |
| 1221 | 1153 |
| 1222 ldp(d8, d9, tos); | 1154 ldp(d8, d9, tos); |
| 1223 ldp(d10, d11, tos); | 1155 ldp(d10, d11, tos); |
| (...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1412 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); | 1344 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); |
| 1413 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); | 1345 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); |
| 1414 Br(scratch1); | 1346 Br(scratch1); |
| 1415 } | 1347 } |
| 1416 | 1348 |
| 1417 | 1349 |
| 1418 void MacroAssembler::InNewSpace(Register object, | 1350 void MacroAssembler::InNewSpace(Register object, |
| 1419 Condition cond, | 1351 Condition cond, |
| 1420 Label* branch) { | 1352 Label* branch) { |
| 1421 ASSERT(cond == eq || cond == ne); | 1353 ASSERT(cond == eq || cond == ne); |
| 1422 // Use Tmp1() to have a different destination register, as Tmp0() will be used | 1354 UseScratchRegisterScope temps(this); |
| 1423 // for relocation. | 1355 Register temp = temps.AcquireX(); |
| 1424 And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate()))); | 1356 And(temp, object, Operand(ExternalReference::new_space_mask(isolate()))); |
| 1425 Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate()))); | 1357 Cmp(temp, Operand(ExternalReference::new_space_start(isolate()))); |
| 1426 B(cond, branch); | 1358 B(cond, branch); |
| 1427 } | 1359 } |
| 1428 | 1360 |
| 1429 | 1361 |
| 1430 void MacroAssembler::Throw(Register value, | 1362 void MacroAssembler::Throw(Register value, |
| 1431 Register scratch1, | 1363 Register scratch1, |
| 1432 Register scratch2, | 1364 Register scratch2, |
| 1433 Register scratch3, | 1365 Register scratch3, |
| 1434 Register scratch4) { | 1366 Register scratch4) { |
| 1435 // Adjust this code if not the case. | 1367 // Adjust this code if not the case. |
| (...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1577 if (emit_debug_code()) { | 1509 if (emit_debug_code()) { |
| 1578 STATIC_ASSERT(kSmiTag == 0); | 1510 STATIC_ASSERT(kSmiTag == 0); |
| 1579 Tst(object, kSmiTagMask); | 1511 Tst(object, kSmiTagMask); |
| 1580 Check(ne, reason); | 1512 Check(ne, reason); |
| 1581 } | 1513 } |
| 1582 } | 1514 } |
| 1583 | 1515 |
| 1584 | 1516 |
| 1585 void MacroAssembler::AssertName(Register object) { | 1517 void MacroAssembler::AssertName(Register object) { |
| 1586 if (emit_debug_code()) { | 1518 if (emit_debug_code()) { |
| 1587 STATIC_ASSERT(kSmiTag == 0); | 1519 AssertNotSmi(object, kOperandIsASmiAndNotAName); |
| 1588 // TODO(jbramley): Add AbortIfSmi and related functions. | |
| 1589 Label not_smi; | |
| 1590 JumpIfNotSmi(object, ¬_smi); | |
| 1591 Abort(kOperandIsASmiAndNotAName); | |
| 1592 Bind(¬_smi); | |
| 1593 | 1520 |
| 1594 Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset)); | 1521 UseScratchRegisterScope temps(this); |
| 1595 CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE); | 1522 Register temp = temps.AcquireX(); |
| 1523 |
| 1524 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1525 CompareInstanceType(temp, temp, LAST_NAME_TYPE); |
| 1596 Check(ls, kOperandIsNotAName); | 1526 Check(ls, kOperandIsNotAName); |
| 1597 } | 1527 } |
| 1598 } | 1528 } |
| 1599 | 1529 |
| 1600 | 1530 |
| 1601 void MacroAssembler::AssertString(Register object) { | 1531 void MacroAssembler::AssertString(Register object) { |
| 1602 if (emit_debug_code()) { | 1532 if (emit_debug_code()) { |
| 1603 Register temp = Tmp1(); | 1533 UseScratchRegisterScope temps(this); |
| 1534 Register temp = temps.AcquireX(); |
| 1604 STATIC_ASSERT(kSmiTag == 0); | 1535 STATIC_ASSERT(kSmiTag == 0); |
| 1605 Tst(object, kSmiTagMask); | 1536 Tst(object, kSmiTagMask); |
| 1606 Check(ne, kOperandIsASmiAndNotAString); | 1537 Check(ne, kOperandIsASmiAndNotAString); |
| 1607 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 1538 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1608 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | 1539 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); |
| 1609 Check(lo, kOperandIsNotAString); | 1540 Check(lo, kOperandIsNotAString); |
| 1610 } | 1541 } |
| 1611 } | 1542 } |
| 1612 | 1543 |
| 1613 | 1544 |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1683 Mov(x3, Operand(thunk_ref)); | 1614 Mov(x3, Operand(thunk_ref)); |
| 1684 B(&end_profiler_check); | 1615 B(&end_profiler_check); |
| 1685 | 1616 |
| 1686 Bind(&profiler_disabled); | 1617 Bind(&profiler_disabled); |
| 1687 Mov(x3, function_address); | 1618 Mov(x3, function_address); |
| 1688 Bind(&end_profiler_check); | 1619 Bind(&end_profiler_check); |
| 1689 | 1620 |
| 1690 // Save the callee-save registers we are going to use. | 1621 // Save the callee-save registers we are going to use. |
| 1691 // TODO(all): Is this necessary? ARM doesn't do it. | 1622 // TODO(all): Is this necessary? ARM doesn't do it. |
| 1692 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4); | 1623 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4); |
| 1693 Poke(x19, (spill_offset + 0) * kXRegSizeInBytes); | 1624 Poke(x19, (spill_offset + 0) * kXRegSize); |
| 1694 Poke(x20, (spill_offset + 1) * kXRegSizeInBytes); | 1625 Poke(x20, (spill_offset + 1) * kXRegSize); |
| 1695 Poke(x21, (spill_offset + 2) * kXRegSizeInBytes); | 1626 Poke(x21, (spill_offset + 2) * kXRegSize); |
| 1696 Poke(x22, (spill_offset + 3) * kXRegSizeInBytes); | 1627 Poke(x22, (spill_offset + 3) * kXRegSize); |
| 1697 | 1628 |
| 1698 // Allocate HandleScope in callee-save registers. | 1629 // Allocate HandleScope in callee-save registers. |
| 1699 // We will need to restore the HandleScope after the call to the API function, | 1630 // We will need to restore the HandleScope after the call to the API function, |
| 1700 // by allocating it in callee-save registers they will be preserved by C code. | 1631 // by allocating it in callee-save registers they will be preserved by C code. |
| 1701 Register handle_scope_base = x22; | 1632 Register handle_scope_base = x22; |
| 1702 Register next_address_reg = x19; | 1633 Register next_address_reg = x19; |
| 1703 Register limit_reg = x20; | 1634 Register limit_reg = x20; |
| 1704 Register level_reg = w21; | 1635 Register level_reg = w21; |
| 1705 | 1636 |
| 1706 Mov(handle_scope_base, Operand(next_address)); | 1637 Mov(handle_scope_base, Operand(next_address)); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1750 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); | 1681 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); |
| 1751 } | 1682 } |
| 1752 Sub(level_reg, level_reg, 1); | 1683 Sub(level_reg, level_reg, 1); |
| 1753 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); | 1684 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); |
| 1754 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset)); | 1685 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset)); |
| 1755 Cmp(limit_reg, x1); | 1686 Cmp(limit_reg, x1); |
| 1756 B(ne, &delete_allocated_handles); | 1687 B(ne, &delete_allocated_handles); |
| 1757 | 1688 |
| 1758 Bind(&leave_exit_frame); | 1689 Bind(&leave_exit_frame); |
| 1759 // Restore callee-saved registers. | 1690 // Restore callee-saved registers. |
| 1760 Peek(x19, (spill_offset + 0) * kXRegSizeInBytes); | 1691 Peek(x19, (spill_offset + 0) * kXRegSize); |
| 1761 Peek(x20, (spill_offset + 1) * kXRegSizeInBytes); | 1692 Peek(x20, (spill_offset + 1) * kXRegSize); |
| 1762 Peek(x21, (spill_offset + 2) * kXRegSizeInBytes); | 1693 Peek(x21, (spill_offset + 2) * kXRegSize); |
| 1763 Peek(x22, (spill_offset + 3) * kXRegSizeInBytes); | 1694 Peek(x22, (spill_offset + 3) * kXRegSize); |
| 1764 | 1695 |
| 1765 // Check if the function scheduled an exception. | 1696 // Check if the function scheduled an exception. |
| 1766 Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate()))); | 1697 Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate()))); |
| 1767 Ldr(x5, MemOperand(x5)); | 1698 Ldr(x5, MemOperand(x5)); |
| 1768 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception); | 1699 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception); |
| 1769 Bind(&exception_handled); | 1700 Bind(&exception_handled); |
| 1770 | 1701 |
| 1771 bool restore_context = context_restore_operand != NULL; | 1702 bool restore_context = context_restore_operand != NULL; |
| 1772 if (restore_context) { | 1703 if (restore_context) { |
| 1773 Ldr(cp, *context_restore_operand); | 1704 Ldr(cp, *context_restore_operand); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1820 Builtins::JavaScript id) { | 1751 Builtins::JavaScript id) { |
| 1821 // Load the builtins object into target register. | 1752 // Load the builtins object into target register. |
| 1822 Ldr(target, GlobalObjectMemOperand()); | 1753 Ldr(target, GlobalObjectMemOperand()); |
| 1823 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); | 1754 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); |
| 1824 // Load the JavaScript builtin function from the builtins object. | 1755 // Load the JavaScript builtin function from the builtins object. |
| 1825 Ldr(target, FieldMemOperand(target, | 1756 Ldr(target, FieldMemOperand(target, |
| 1826 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | 1757 JSBuiltinsObject::OffsetOfFunctionWithId(id))); |
| 1827 } | 1758 } |
| 1828 | 1759 |
| 1829 | 1760 |
| 1830 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | 1761 void MacroAssembler::GetBuiltinEntry(Register target, |
| 1831 ASSERT(!target.is(x1)); | 1762 Register function, |
| 1832 GetBuiltinFunction(x1, id); | 1763 Builtins::JavaScript id) { |
| 1764 ASSERT(!AreAliased(target, function)); |
| 1765 GetBuiltinFunction(function, id); |
| 1833 // Load the code entry point from the builtins object. | 1766 // Load the code entry point from the builtins object. |
| 1834 Ldr(target, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); | 1767 Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); |
| 1835 } | 1768 } |
| 1836 | 1769 |
| 1837 | 1770 |
| 1838 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | 1771 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
| 1839 InvokeFlag flag, | 1772 InvokeFlag flag, |
| 1840 const CallWrapper& call_wrapper) { | 1773 const CallWrapper& call_wrapper) { |
| 1841 ASM_LOCATION("MacroAssembler::InvokeBuiltin"); | 1774 ASM_LOCATION("MacroAssembler::InvokeBuiltin"); |
| 1842 // You can't call a builtin without a valid frame. | 1775 // You can't call a builtin without a valid frame. |
| 1843 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 1776 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
| 1844 | 1777 |
| 1845 GetBuiltinEntry(x2, id); | 1778 // Get the builtin entry in x2 and setup the function object in x1. |
| 1779 GetBuiltinEntry(x2, x1, id); |
| 1846 if (flag == CALL_FUNCTION) { | 1780 if (flag == CALL_FUNCTION) { |
| 1847 call_wrapper.BeforeCall(CallSize(x2)); | 1781 call_wrapper.BeforeCall(CallSize(x2)); |
| 1848 Call(x2); | 1782 Call(x2); |
| 1849 call_wrapper.AfterCall(); | 1783 call_wrapper.AfterCall(); |
| 1850 } else { | 1784 } else { |
| 1851 ASSERT(flag == JUMP_FUNCTION); | 1785 ASSERT(flag == JUMP_FUNCTION); |
| 1852 Jump(x2); | 1786 Jump(x2); |
| 1853 } | 1787 } |
| 1854 } | 1788 } |
| 1855 | 1789 |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1910 | 1844 |
| 1911 void MacroAssembler::CallCFunction(ExternalReference function, | 1845 void MacroAssembler::CallCFunction(ExternalReference function, |
| 1912 int num_of_reg_args) { | 1846 int num_of_reg_args) { |
| 1913 CallCFunction(function, num_of_reg_args, 0); | 1847 CallCFunction(function, num_of_reg_args, 0); |
| 1914 } | 1848 } |
| 1915 | 1849 |
| 1916 | 1850 |
| 1917 void MacroAssembler::CallCFunction(ExternalReference function, | 1851 void MacroAssembler::CallCFunction(ExternalReference function, |
| 1918 int num_of_reg_args, | 1852 int num_of_reg_args, |
| 1919 int num_of_double_args) { | 1853 int num_of_double_args) { |
| 1920 Mov(Tmp0(), Operand(function)); | 1854 UseScratchRegisterScope temps(this); |
| 1921 CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args); | 1855 Register temp = temps.AcquireX(); |
| 1856 Mov(temp, Operand(function)); |
| 1857 CallCFunction(temp, num_of_reg_args, num_of_double_args); |
| 1922 } | 1858 } |
| 1923 | 1859 |
| 1924 | 1860 |
| 1925 void MacroAssembler::CallCFunction(Register function, | 1861 void MacroAssembler::CallCFunction(Register function, |
| 1926 int num_of_reg_args, | 1862 int num_of_reg_args, |
| 1927 int num_of_double_args) { | 1863 int num_of_double_args) { |
| 1928 ASSERT(has_frame()); | 1864 ASSERT(has_frame()); |
| 1929 // We can pass 8 integer arguments in registers. If we need to pass more than | 1865 // We can pass 8 integer arguments in registers. If we need to pass more than |
| 1930 // that, we'll need to implement support for passing them on the stack. | 1866 // that, we'll need to implement support for passing them on the stack. |
| 1931 ASSERT(num_of_reg_args <= 8); | 1867 ASSERT(num_of_reg_args <= 8); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1964 | 1900 |
| 1965 // Call directly. The function called cannot cause a GC, or allow preemption, | 1901 // Call directly. The function called cannot cause a GC, or allow preemption, |
| 1966 // so the return address in the link register stays correct. | 1902 // so the return address in the link register stays correct. |
| 1967 Call(function); | 1903 Call(function); |
| 1968 | 1904 |
| 1969 if (!csp.Is(old_stack_pointer)) { | 1905 if (!csp.Is(old_stack_pointer)) { |
| 1970 if (emit_debug_code()) { | 1906 if (emit_debug_code()) { |
| 1971 // Because the stack pointer must be aligned on a 16-byte boundary, the | 1907 // Because the stack pointer must be aligned on a 16-byte boundary, the |
| 1972 // aligned csp can be up to 12 bytes below the jssp. This is the case | 1908 // aligned csp can be up to 12 bytes below the jssp. This is the case |
| 1973 // where we only pushed one W register on top of an aligned jssp. | 1909 // where we only pushed one W register on top of an aligned jssp. |
| 1974 Register temp = Tmp1(); | 1910 UseScratchRegisterScope temps(this); |
| 1911 Register temp = temps.AcquireX(); |
| 1975 ASSERT(ActivationFrameAlignment() == 16); | 1912 ASSERT(ActivationFrameAlignment() == 16); |
| 1976 Sub(temp, csp, old_stack_pointer); | 1913 Sub(temp, csp, old_stack_pointer); |
| 1977 // We want temp <= 0 && temp >= -12. | 1914 // We want temp <= 0 && temp >= -12. |
| 1978 Cmp(temp, 0); | 1915 Cmp(temp, 0); |
| 1979 Ccmp(temp, -12, NFlag, le); | 1916 Ccmp(temp, -12, NFlag, le); |
| 1980 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); | 1917 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); |
| 1981 } | 1918 } |
| 1982 SetStackPointer(old_stack_pointer); | 1919 SetStackPointer(old_stack_pointer); |
| 1983 } | 1920 } |
| 1984 } | 1921 } |
| 1985 | 1922 |
| 1986 | 1923 |
| 1987 void MacroAssembler::Jump(Register target) { | 1924 void MacroAssembler::Jump(Register target) { |
| 1988 Br(target); | 1925 Br(target); |
| 1989 } | 1926 } |
| 1990 | 1927 |
| 1991 | 1928 |
| 1992 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { | 1929 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { |
| 1993 Mov(Tmp0(), Operand(target, rmode)); | 1930 UseScratchRegisterScope temps(this); |
| 1994 Br(Tmp0()); | 1931 Register temp = temps.AcquireX(); |
| 1932 Mov(temp, Operand(target, rmode)); |
| 1933 Br(temp); |
| 1995 } | 1934 } |
| 1996 | 1935 |
| 1997 | 1936 |
| 1998 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { | 1937 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { |
| 1999 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | 1938 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
| 2000 Jump(reinterpret_cast<intptr_t>(target), rmode); | 1939 Jump(reinterpret_cast<intptr_t>(target), rmode); |
| 2001 } | 1940 } |
| 2002 | 1941 |
| 2003 | 1942 |
| 2004 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { | 1943 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { |
| 2005 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 1944 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 2006 AllowDeferredHandleDereference embedding_raw_address; | 1945 AllowDeferredHandleDereference embedding_raw_address; |
| 2007 Jump(reinterpret_cast<intptr_t>(code.location()), rmode); | 1946 Jump(reinterpret_cast<intptr_t>(code.location()), rmode); |
| 2008 } | 1947 } |
| 2009 | 1948 |
| 2010 | 1949 |
| 2011 void MacroAssembler::Call(Register target) { | 1950 void MacroAssembler::Call(Register target) { |
| 2012 BlockConstPoolScope scope(this); | 1951 BlockPoolsScope scope(this); |
| 2013 #ifdef DEBUG | 1952 #ifdef DEBUG |
| 2014 Label start_call; | 1953 Label start_call; |
| 2015 Bind(&start_call); | 1954 Bind(&start_call); |
| 2016 #endif | 1955 #endif |
| 2017 | 1956 |
| 2018 Blr(target); | 1957 Blr(target); |
| 2019 | 1958 |
| 2020 #ifdef DEBUG | 1959 #ifdef DEBUG |
| 2021 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); | 1960 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); |
| 2022 #endif | 1961 #endif |
| 2023 } | 1962 } |
| 2024 | 1963 |
| 2025 | 1964 |
| 2026 void MacroAssembler::Call(Label* target) { | 1965 void MacroAssembler::Call(Label* target) { |
| 2027 BlockConstPoolScope scope(this); | 1966 BlockPoolsScope scope(this); |
| 2028 #ifdef DEBUG | 1967 #ifdef DEBUG |
| 2029 Label start_call; | 1968 Label start_call; |
| 2030 Bind(&start_call); | 1969 Bind(&start_call); |
| 2031 #endif | 1970 #endif |
| 2032 | 1971 |
| 2033 Bl(target); | 1972 Bl(target); |
| 2034 | 1973 |
| 2035 #ifdef DEBUG | 1974 #ifdef DEBUG |
| 2036 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); | 1975 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); |
| 2037 #endif | 1976 #endif |
| 2038 } | 1977 } |
| 2039 | 1978 |
| 2040 | 1979 |
| 2041 // MacroAssembler::CallSize is sensitive to changes in this function, as it | 1980 // MacroAssembler::CallSize is sensitive to changes in this function, as it |
| 2042 // requires to know how many instructions are used to branch to the target. | 1981 // requires to know how many instructions are used to branch to the target. |
| 2043 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { | 1982 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { |
| 2044 BlockConstPoolScope scope(this); | 1983 BlockPoolsScope scope(this); |
| 2045 #ifdef DEBUG | 1984 #ifdef DEBUG |
| 2046 Label start_call; | 1985 Label start_call; |
| 2047 Bind(&start_call); | 1986 Bind(&start_call); |
| 2048 #endif | 1987 #endif |
| 2049 // Statement positions are expected to be recorded when the target | 1988 // Statement positions are expected to be recorded when the target |
| 2050 // address is loaded. | 1989 // address is loaded. |
| 2051 positions_recorder()->WriteRecordedPositions(); | 1990 positions_recorder()->WriteRecordedPositions(); |
| 2052 | 1991 |
| 2053 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | 1992 // Addresses always have 64 bits, so we shouldn't encounter NONE32. |
| 2054 ASSERT(rmode != RelocInfo::NONE32); | 1993 ASSERT(rmode != RelocInfo::NONE32); |
| 2055 | 1994 |
| 1995 UseScratchRegisterScope temps(this); |
| 1996 Register temp = temps.AcquireX(); |
| 1997 |
| 2056 if (rmode == RelocInfo::NONE64) { | 1998 if (rmode == RelocInfo::NONE64) { |
| 2057 uint64_t imm = reinterpret_cast<uint64_t>(target); | 1999 uint64_t imm = reinterpret_cast<uint64_t>(target); |
| 2058 movz(Tmp0(), (imm >> 0) & 0xffff, 0); | 2000 movz(temp, (imm >> 0) & 0xffff, 0); |
| 2059 movk(Tmp0(), (imm >> 16) & 0xffff, 16); | 2001 movk(temp, (imm >> 16) & 0xffff, 16); |
| 2060 movk(Tmp0(), (imm >> 32) & 0xffff, 32); | 2002 movk(temp, (imm >> 32) & 0xffff, 32); |
| 2061 movk(Tmp0(), (imm >> 48) & 0xffff, 48); | 2003 movk(temp, (imm >> 48) & 0xffff, 48); |
| 2062 } else { | 2004 } else { |
| 2063 LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode)); | 2005 LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode)); |
| 2064 } | 2006 } |
| 2065 Blr(Tmp0()); | 2007 Blr(temp); |
| 2066 #ifdef DEBUG | 2008 #ifdef DEBUG |
| 2067 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); | 2009 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); |
| 2068 #endif | 2010 #endif |
| 2069 } | 2011 } |
| 2070 | 2012 |
| 2071 | 2013 |
| 2072 void MacroAssembler::Call(Handle<Code> code, | 2014 void MacroAssembler::Call(Handle<Code> code, |
| 2073 RelocInfo::Mode rmode, | 2015 RelocInfo::Mode rmode, |
| 2074 TypeFeedbackId ast_id) { | 2016 TypeFeedbackId ast_id) { |
| 2075 #ifdef DEBUG | 2017 #ifdef DEBUG |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2136 | 2078 |
| 2137 | 2079 |
| 2138 | 2080 |
| 2139 | 2081 |
| 2140 | 2082 |
| 2141 void MacroAssembler::JumpForHeapNumber(Register object, | 2083 void MacroAssembler::JumpForHeapNumber(Register object, |
| 2142 Register heap_number_map, | 2084 Register heap_number_map, |
| 2143 Label* on_heap_number, | 2085 Label* on_heap_number, |
| 2144 Label* on_not_heap_number) { | 2086 Label* on_not_heap_number) { |
| 2145 ASSERT(on_heap_number || on_not_heap_number); | 2087 ASSERT(on_heap_number || on_not_heap_number); |
| 2146 // Tmp0() is used as a scratch register. | |
| 2147 ASSERT(!AreAliased(Tmp0(), heap_number_map)); | |
| 2148 AssertNotSmi(object); | 2088 AssertNotSmi(object); |
| 2149 | 2089 |
| 2090 UseScratchRegisterScope temps(this); |
| 2091 Register temp = temps.AcquireX(); |
| 2092 |
| 2150 // Load the HeapNumber map if it is not passed. | 2093 // Load the HeapNumber map if it is not passed. |
| 2151 if (heap_number_map.Is(NoReg)) { | 2094 if (heap_number_map.Is(NoReg)) { |
| 2152 heap_number_map = Tmp1(); | 2095 heap_number_map = temps.AcquireX(); |
| 2153 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2096 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2154 } else { | 2097 } else { |
| 2155 // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map. | |
| 2156 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2098 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2157 } | 2099 } |
| 2158 | 2100 |
| 2159 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 2101 ASSERT(!AreAliased(temp, heap_number_map)); |
| 2160 Cmp(Tmp0(), heap_number_map); | 2102 |
| 2103 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2104 Cmp(temp, heap_number_map); |
| 2161 | 2105 |
| 2162 if (on_heap_number) { | 2106 if (on_heap_number) { |
| 2163 B(eq, on_heap_number); | 2107 B(eq, on_heap_number); |
| 2164 } | 2108 } |
| 2165 if (on_not_heap_number) { | 2109 if (on_not_heap_number) { |
| 2166 B(ne, on_not_heap_number); | 2110 B(ne, on_not_heap_number); |
| 2167 } | 2111 } |
| 2168 } | 2112 } |
| 2169 | 2113 |
| 2170 | 2114 |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2214 // number string cache for smis is just the smi value, and the hash for | 2158 // number string cache for smis is just the smi value, and the hash for |
| 2215 // doubles is the xor of the upper and lower words. See | 2159 // doubles is the xor of the upper and lower words. See |
| 2216 // Heap::GetNumberStringCache. | 2160 // Heap::GetNumberStringCache. |
| 2217 Label is_smi; | 2161 Label is_smi; |
| 2218 Label load_result_from_cache; | 2162 Label load_result_from_cache; |
| 2219 | 2163 |
| 2220 JumpIfSmi(object, &is_smi); | 2164 JumpIfSmi(object, &is_smi); |
| 2221 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, | 2165 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, |
| 2222 DONT_DO_SMI_CHECK); | 2166 DONT_DO_SMI_CHECK); |
| 2223 | 2167 |
| 2224 STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2)); | 2168 STATIC_ASSERT(kDoubleSize == (kWRegSize * 2)); |
| 2225 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag); | 2169 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag); |
| 2226 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1)); | 2170 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1)); |
| 2227 Eor(scratch1, scratch1, scratch2); | 2171 Eor(scratch1, scratch1, scratch2); |
| 2228 And(scratch1, scratch1, mask); | 2172 And(scratch1, scratch1, mask); |
| 2229 | 2173 |
| 2230 // Calculate address of entry in string cache: each entry consists of two | 2174 // Calculate address of entry in string cache: each entry consists of two |
| 2231 // pointer sized fields. | 2175 // pointer sized fields. |
| 2232 Add(scratch1, number_string_cache, | 2176 Add(scratch1, number_string_cache, |
| 2233 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | 2177 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); |
| 2234 | 2178 |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2276 B(on_successful_conversion, eq); | 2220 B(on_successful_conversion, eq); |
| 2277 } | 2221 } |
| 2278 if (on_failed_conversion) { | 2222 if (on_failed_conversion) { |
| 2279 B(on_failed_conversion, ne); | 2223 B(on_failed_conversion, ne); |
| 2280 } | 2224 } |
| 2281 } | 2225 } |
| 2282 | 2226 |
| 2283 | 2227 |
| 2284 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, | 2228 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, |
| 2285 Label* on_negative_zero) { | 2229 Label* on_negative_zero) { |
| 2230 UseScratchRegisterScope temps(this); |
| 2231 Register temp = temps.AcquireX(); |
| 2286 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will | 2232 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will |
| 2287 // cause overflow. | 2233 // cause overflow. |
| 2288 Fmov(Tmp0(), input); | 2234 Fmov(temp, input); |
| 2289 Cmp(Tmp0(), 1); | 2235 Cmp(temp, 1); |
| 2290 B(vs, on_negative_zero); | 2236 B(vs, on_negative_zero); |
| 2291 } | 2237 } |
| 2292 | 2238 |
| 2293 | 2239 |
| 2294 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { | 2240 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { |
| 2295 // Clamp the value to [0..255]. | 2241 // Clamp the value to [0..255]. |
| 2296 Cmp(input.W(), Operand(input.W(), UXTB)); | 2242 Cmp(input.W(), Operand(input.W(), UXTB)); |
| 2297 // If input < input & 0xff, it must be < 0, so saturate to 0. | 2243 // If input < input & 0xff, it must be < 0, so saturate to 0. |
| 2298 Csel(output.W(), wzr, input.W(), lt); | 2244 Csel(output.W(), wzr, input.W(), lt); |
| 2299 // Create a constant 0xff. | 2245 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255. |
| 2300 Mov(WTmp0(), 255); | 2246 Csel(output.W(), output.W(), 255, le); |
| 2301 // If input > input & 0xff, it must be > 255, so saturate to 255. | |
| 2302 Csel(output.W(), WTmp0(), output.W(), gt); | |
| 2303 } | 2247 } |
| 2304 | 2248 |
| 2305 | 2249 |
| 2306 void MacroAssembler::ClampInt32ToUint8(Register in_out) { | 2250 void MacroAssembler::ClampInt32ToUint8(Register in_out) { |
| 2307 ClampInt32ToUint8(in_out, in_out); | 2251 ClampInt32ToUint8(in_out, in_out); |
| 2308 } | 2252 } |
| 2309 | 2253 |
| 2310 | 2254 |
| 2311 void MacroAssembler::ClampDoubleToUint8(Register output, | 2255 void MacroAssembler::ClampDoubleToUint8(Register output, |
| 2312 DoubleRegister input, | 2256 DoubleRegister input, |
| (...skipping 13 matching lines...) Expand all Loading... |
| 2326 // Values greater than 255 have already been clamped to 255. | 2270 // Values greater than 255 have already been clamped to 255. |
| 2327 Fcvtnu(output, dbl_scratch); | 2271 Fcvtnu(output, dbl_scratch); |
| 2328 } | 2272 } |
| 2329 | 2273 |
| 2330 | 2274 |
| 2331 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, | 2275 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, |
| 2332 Register src, | 2276 Register src, |
| 2333 unsigned count, | 2277 unsigned count, |
| 2334 Register scratch1, | 2278 Register scratch1, |
| 2335 Register scratch2, | 2279 Register scratch2, |
| 2336 Register scratch3) { | 2280 Register scratch3, |
| 2281 Register scratch4, |
| 2282 Register scratch5) { |
| 2337 // Untag src and dst into scratch registers. | 2283 // Untag src and dst into scratch registers. |
| 2338 // Copy src->dst in a tight loop. | 2284 // Copy src->dst in a tight loop. |
| 2339 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1())); | 2285 ASSERT(!AreAliased(dst, src, |
| 2286 scratch1, scratch2, scratch3, scratch4, scratch5)); |
| 2340 ASSERT(count >= 2); | 2287 ASSERT(count >= 2); |
| 2341 | 2288 |
| 2342 const Register& remaining = scratch3; | 2289 const Register& remaining = scratch3; |
| 2343 Mov(remaining, count / 2); | 2290 Mov(remaining, count / 2); |
| 2344 | 2291 |
| 2345 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2346 InstructionAccurateScope scope(this); | |
| 2347 | |
| 2348 const Register& dst_untagged = scratch1; | 2292 const Register& dst_untagged = scratch1; |
| 2349 const Register& src_untagged = scratch2; | 2293 const Register& src_untagged = scratch2; |
| 2350 sub(dst_untagged, dst, kHeapObjectTag); | 2294 Sub(dst_untagged, dst, kHeapObjectTag); |
| 2351 sub(src_untagged, src, kHeapObjectTag); | 2295 Sub(src_untagged, src, kHeapObjectTag); |
| 2352 | 2296 |
| 2353 // Copy fields in pairs. | 2297 // Copy fields in pairs. |
| 2354 Label loop; | 2298 Label loop; |
| 2355 bind(&loop); | 2299 Bind(&loop); |
| 2356 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, | 2300 Ldp(scratch4, scratch5, |
| 2357 PostIndex)); | 2301 MemOperand(src_untagged, kXRegSize* 2, PostIndex)); |
| 2358 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, | 2302 Stp(scratch4, scratch5, |
| 2359 PostIndex)); | 2303 MemOperand(dst_untagged, kXRegSize* 2, PostIndex)); |
| 2360 sub(remaining, remaining, 1); | 2304 Sub(remaining, remaining, 1); |
| 2361 cbnz(remaining, &loop); | 2305 Cbnz(remaining, &loop); |
| 2362 | 2306 |
| 2363 // Handle the leftovers. | 2307 // Handle the leftovers. |
| 2364 if (count & 1) { | 2308 if (count & 1) { |
| 2365 ldr(Tmp0(), MemOperand(src_untagged)); | 2309 Ldr(scratch4, MemOperand(src_untagged)); |
| 2366 str(Tmp0(), MemOperand(dst_untagged)); | 2310 Str(scratch4, MemOperand(dst_untagged)); |
| 2367 } | 2311 } |
| 2368 } | 2312 } |
| 2369 | 2313 |
| 2370 | 2314 |
| 2371 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, | 2315 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, |
| 2372 Register src, | 2316 Register src, |
| 2373 unsigned count, | 2317 unsigned count, |
| 2374 Register scratch1, | 2318 Register scratch1, |
| 2375 Register scratch2) { | 2319 Register scratch2, |
| 2320 Register scratch3, |
| 2321 Register scratch4) { |
| 2376 // Untag src and dst into scratch registers. | 2322 // Untag src and dst into scratch registers. |
| 2377 // Copy src->dst in an unrolled loop. | 2323 // Copy src->dst in an unrolled loop. |
| 2378 ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1())); | 2324 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); |
| 2379 | |
| 2380 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2381 InstructionAccurateScope scope(this); | |
| 2382 | 2325 |
| 2383 const Register& dst_untagged = scratch1; | 2326 const Register& dst_untagged = scratch1; |
| 2384 const Register& src_untagged = scratch2; | 2327 const Register& src_untagged = scratch2; |
| 2385 sub(dst_untagged, dst, kHeapObjectTag); | 2328 sub(dst_untagged, dst, kHeapObjectTag); |
| 2386 sub(src_untagged, src, kHeapObjectTag); | 2329 sub(src_untagged, src, kHeapObjectTag); |
| 2387 | 2330 |
| 2388 // Copy fields in pairs. | 2331 // Copy fields in pairs. |
| 2389 for (unsigned i = 0; i < count / 2; i++) { | 2332 for (unsigned i = 0; i < count / 2; i++) { |
| 2390 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, | 2333 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex)); |
| 2391 PostIndex)); | 2334 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex)); |
| 2392 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, | |
| 2393 PostIndex)); | |
| 2394 } | 2335 } |
| 2395 | 2336 |
| 2396 // Handle the leftovers. | 2337 // Handle the leftovers. |
| 2397 if (count & 1) { | 2338 if (count & 1) { |
| 2398 ldr(Tmp0(), MemOperand(src_untagged)); | 2339 Ldr(scratch3, MemOperand(src_untagged)); |
| 2399 str(Tmp0(), MemOperand(dst_untagged)); | 2340 Str(scratch3, MemOperand(dst_untagged)); |
| 2400 } | 2341 } |
| 2401 } | 2342 } |
| 2402 | 2343 |
| 2403 | 2344 |
| 2404 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, | 2345 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, |
| 2405 Register src, | 2346 Register src, |
| 2406 unsigned count, | 2347 unsigned count, |
| 2407 Register scratch1) { | 2348 Register scratch1, |
| 2349 Register scratch2, |
| 2350 Register scratch3) { |
| 2408 // Untag src and dst into scratch registers. | 2351 // Untag src and dst into scratch registers. |
| 2409 // Copy src->dst in an unrolled loop. | 2352 // Copy src->dst in an unrolled loop. |
| 2410 ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1())); | 2353 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3)); |
| 2411 | |
| 2412 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2413 InstructionAccurateScope scope(this); | |
| 2414 | 2354 |
| 2415 const Register& dst_untagged = scratch1; | 2355 const Register& dst_untagged = scratch1; |
| 2416 const Register& src_untagged = Tmp1(); | 2356 const Register& src_untagged = scratch2; |
| 2417 sub(dst_untagged, dst, kHeapObjectTag); | 2357 Sub(dst_untagged, dst, kHeapObjectTag); |
| 2418 sub(src_untagged, src, kHeapObjectTag); | 2358 Sub(src_untagged, src, kHeapObjectTag); |
| 2419 | 2359 |
| 2420 // Copy fields one by one. | 2360 // Copy fields one by one. |
| 2421 for (unsigned i = 0; i < count; i++) { | 2361 for (unsigned i = 0; i < count; i++) { |
| 2422 ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex)); | 2362 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex)); |
| 2423 str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex)); | 2363 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex)); |
| 2424 } | 2364 } |
| 2425 } | 2365 } |
| 2426 | 2366 |
| 2427 | 2367 |
| 2428 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, | 2368 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, |
| 2429 unsigned count) { | 2369 unsigned count) { |
| 2430 // One of two methods is used: | 2370 // One of two methods is used: |
| 2431 // | 2371 // |
| 2432 // For high 'count' values where many scratch registers are available: | 2372 // For high 'count' values where many scratch registers are available: |
| 2433 // Untag src and dst into scratch registers. | 2373 // Untag src and dst into scratch registers. |
| 2434 // Copy src->dst in a tight loop. | 2374 // Copy src->dst in a tight loop. |
| 2435 // | 2375 // |
| 2436 // For low 'count' values or where few scratch registers are available: | 2376 // For low 'count' values or where few scratch registers are available: |
| 2437 // Untag src and dst into scratch registers. | 2377 // Untag src and dst into scratch registers. |
| 2438 // Copy src->dst in an unrolled loop. | 2378 // Copy src->dst in an unrolled loop. |
| 2439 // | 2379 // |
| 2440 // In both cases, fields are copied in pairs if possible, and left-overs are | 2380 // In both cases, fields are copied in pairs if possible, and left-overs are |
| 2441 // handled separately. | 2381 // handled separately. |
| 2382 ASSERT(!AreAliased(dst, src)); |
| 2442 ASSERT(!temps.IncludesAliasOf(dst)); | 2383 ASSERT(!temps.IncludesAliasOf(dst)); |
| 2443 ASSERT(!temps.IncludesAliasOf(src)); | 2384 ASSERT(!temps.IncludesAliasOf(src)); |
| 2444 ASSERT(!temps.IncludesAliasOf(Tmp0())); | |
| 2445 ASSERT(!temps.IncludesAliasOf(Tmp1())); | |
| 2446 ASSERT(!temps.IncludesAliasOf(xzr)); | 2385 ASSERT(!temps.IncludesAliasOf(xzr)); |
| 2447 ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1())); | |
| 2448 | 2386 |
| 2449 if (emit_debug_code()) { | 2387 if (emit_debug_code()) { |
| 2450 Cmp(dst, src); | 2388 Cmp(dst, src); |
| 2451 Check(ne, kTheSourceAndDestinationAreTheSame); | 2389 Check(ne, kTheSourceAndDestinationAreTheSame); |
| 2452 } | 2390 } |
| 2453 | 2391 |
| 2454 // The value of 'count' at which a loop will be generated (if there are | 2392 // The value of 'count' at which a loop will be generated (if there are |
| 2455 // enough scratch registers). | 2393 // enough scratch registers). |
| 2456 static const unsigned kLoopThreshold = 8; | 2394 static const unsigned kLoopThreshold = 8; |
| 2457 | 2395 |
| 2458 ASSERT(!temps.IsEmpty()); | 2396 UseScratchRegisterScope masm_temps(this); |
| 2459 Register scratch1 = Register(temps.PopLowestIndex()); | 2397 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) { |
| 2460 Register scratch2 = Register(temps.PopLowestIndex()); | 2398 CopyFieldsLoopPairsHelper(dst, src, count, |
| 2461 Register scratch3 = Register(temps.PopLowestIndex()); | 2399 Register(temps.PopLowestIndex()), |
| 2462 | 2400 Register(temps.PopLowestIndex()), |
| 2463 if (scratch3.IsValid() && (count >= kLoopThreshold)) { | 2401 Register(temps.PopLowestIndex()), |
| 2464 CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3); | 2402 masm_temps.AcquireX(), |
| 2465 } else if (scratch2.IsValid()) { | 2403 masm_temps.AcquireX()); |
| 2466 CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2); | 2404 } else if (temps.Count() >= 2) { |
| 2467 } else if (scratch1.IsValid()) { | 2405 CopyFieldsUnrolledPairsHelper(dst, src, count, |
| 2468 CopyFieldsUnrolledHelper(dst, src, count, scratch1); | 2406 Register(temps.PopLowestIndex()), |
| 2407 Register(temps.PopLowestIndex()), |
| 2408 masm_temps.AcquireX(), |
| 2409 masm_temps.AcquireX()); |
| 2410 } else if (temps.Count() == 1) { |
| 2411 CopyFieldsUnrolledHelper(dst, src, count, |
| 2412 Register(temps.PopLowestIndex()), |
| 2413 masm_temps.AcquireX(), |
| 2414 masm_temps.AcquireX()); |
| 2469 } else { | 2415 } else { |
| 2470 UNREACHABLE(); | 2416 UNREACHABLE(); |
| 2471 } | 2417 } |
| 2472 } | 2418 } |
| 2473 | 2419 |
| 2474 | 2420 |
| 2475 void MacroAssembler::CopyBytes(Register dst, | 2421 void MacroAssembler::CopyBytes(Register dst, |
| 2476 Register src, | 2422 Register src, |
| 2477 Register length, | 2423 Register length, |
| 2478 Register scratch, | 2424 Register scratch, |
| (...skipping 20 matching lines...) Expand all Loading... |
| 2499 | 2445 |
| 2500 Bind(&loop); | 2446 Bind(&loop); |
| 2501 Sub(length, length, 1); | 2447 Sub(length, length, 1); |
| 2502 Ldrb(scratch, MemOperand(src, 1, PostIndex)); | 2448 Ldrb(scratch, MemOperand(src, 1, PostIndex)); |
| 2503 Strb(scratch, MemOperand(dst, 1, PostIndex)); | 2449 Strb(scratch, MemOperand(dst, 1, PostIndex)); |
| 2504 Cbnz(length, &loop); | 2450 Cbnz(length, &loop); |
| 2505 Bind(&done); | 2451 Bind(&done); |
| 2506 } | 2452 } |
| 2507 | 2453 |
| 2508 | 2454 |
| 2509 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, | 2455 void MacroAssembler::FillFields(Register dst, |
| 2510 Register end_offset, | 2456 Register field_count, |
| 2511 Register filler) { | 2457 Register filler) { |
| 2512 Label loop, entry; | 2458 ASSERT(!dst.Is(csp)); |
| 2459 UseScratchRegisterScope temps(this); |
| 2460 Register field_ptr = temps.AcquireX(); |
| 2461 Register counter = temps.AcquireX(); |
| 2462 Label done; |
| 2463 |
| 2464 // Decrement count. If the result < zero, count was zero, and there's nothing |
| 2465 // to do. If count was one, flags are set to fail the gt condition at the end |
| 2466 // of the pairs loop. |
| 2467 Subs(counter, field_count, 1); |
| 2468 B(lt, &done); |
| 2469 |
| 2470 // There's at least one field to fill, so do this unconditionally. |
| 2471 Str(filler, MemOperand(dst, kPointerSize, PostIndex)); |
| 2472 |
| 2473 // If the bottom bit of counter is set, there are an even number of fields to |
| 2474 // fill, so pull the start pointer back by one field, allowing the pairs loop |
| 2475 // to overwrite the field that was stored above. |
| 2476 And(field_ptr, counter, 1); |
| 2477 Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2)); |
| 2478 |
| 2479 // Store filler to memory in pairs. |
| 2480 Label entry, loop; |
| 2513 B(&entry); | 2481 B(&entry); |
| 2514 Bind(&loop); | 2482 Bind(&loop); |
| 2515 // TODO(all): consider using stp here. | 2483 Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex)); |
| 2516 Str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); | 2484 Subs(counter, counter, 2); |
| 2517 Bind(&entry); | 2485 Bind(&entry); |
| 2518 Cmp(start_offset, end_offset); | 2486 B(gt, &loop); |
| 2519 B(lt, &loop); | 2487 |
| 2488 Bind(&done); |
| 2520 } | 2489 } |
| 2521 | 2490 |
| 2522 | 2491 |
| 2523 void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings( | 2492 void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings( |
| 2524 Register first, | 2493 Register first, |
| 2525 Register second, | 2494 Register second, |
| 2526 Register scratch1, | 2495 Register scratch1, |
| 2527 Register scratch2, | 2496 Register scratch2, |
| 2528 Label* failure, | 2497 Label* failure, |
| 2529 SmiCheckType smi_check) { | 2498 SmiCheckType smi_check) { |
| (...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2848 result, | 2817 result, |
| 2849 0, | 2818 0, |
| 2850 true, // is_truncating | 2819 true, // is_truncating |
| 2851 true); // skip_fastpath | 2820 true); // skip_fastpath |
| 2852 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber | 2821 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber |
| 2853 | 2822 |
| 2854 Drop(1, kDoubleSize); // Drop the double input on the stack. | 2823 Drop(1, kDoubleSize); // Drop the double input on the stack. |
| 2855 Pop(lr); | 2824 Pop(lr); |
| 2856 | 2825 |
| 2857 Bind(&done); | 2826 Bind(&done); |
| 2858 | |
| 2859 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: | |
| 2860 // https://code.google.com/p/v8/issues/detail?id=3149 | |
| 2861 Sxtw(result, result.W()); | |
| 2862 } | 2827 } |
| 2863 | 2828 |
| 2864 | 2829 |
| 2865 void MacroAssembler::TruncateHeapNumberToI(Register result, | 2830 void MacroAssembler::TruncateHeapNumberToI(Register result, |
| 2866 Register object) { | 2831 Register object) { |
| 2867 Label done; | 2832 Label done; |
| 2868 ASSERT(!result.is(object)); | 2833 ASSERT(!result.is(object)); |
| 2869 ASSERT(jssp.Is(StackPointer())); | 2834 ASSERT(jssp.Is(StackPointer())); |
| 2870 | 2835 |
| 2871 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); | 2836 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 2872 | 2837 |
| 2873 // Try to convert the double to an int64. If successful, the bottom 32 bits | 2838 // Try to convert the double to an int64. If successful, the bottom 32 bits |
| 2874 // contain our truncated int32 result. | 2839 // contain our truncated int32 result. |
| 2875 TryConvertDoubleToInt64(result, fp_scratch, &done); | 2840 TryConvertDoubleToInt64(result, fp_scratch, &done); |
| 2876 | 2841 |
| 2877 // If we fell through then inline version didn't succeed - call stub instead. | 2842 // If we fell through then inline version didn't succeed - call stub instead. |
| 2878 Push(lr); | 2843 Push(lr); |
| 2879 DoubleToIStub stub(object, | 2844 DoubleToIStub stub(object, |
| 2880 result, | 2845 result, |
| 2881 HeapNumber::kValueOffset - kHeapObjectTag, | 2846 HeapNumber::kValueOffset - kHeapObjectTag, |
| 2882 true, // is_truncating | 2847 true, // is_truncating |
| 2883 true); // skip_fastpath | 2848 true); // skip_fastpath |
| 2884 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber | 2849 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber |
| 2885 Pop(lr); | 2850 Pop(lr); |
| 2886 | 2851 |
| 2887 Bind(&done); | 2852 Bind(&done); |
| 2888 | |
| 2889 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: | |
| 2890 // https://code.google.com/p/v8/issues/detail?id=3149 | |
| 2891 Sxtw(result, result.W()); | |
| 2892 } | 2853 } |
| 2893 | 2854 |
| 2894 | 2855 |
| 2895 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { | 2856 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { |
| 2896 if (frame_mode == BUILD_STUB_FRAME) { | 2857 if (frame_mode == BUILD_STUB_FRAME) { |
| 2897 ASSERT(StackPointer().Is(jssp)); | 2858 ASSERT(StackPointer().Is(jssp)); |
| 2898 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already | 2859 UseScratchRegisterScope temps(this); |
| 2899 // have the special STUB smi? | 2860 Register temp = temps.AcquireX(); |
| 2900 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB))); | 2861 __ Mov(temp, Operand(Smi::FromInt(StackFrame::STUB))); |
| 2901 // Compiled stubs don't age, and so they don't need the predictable code | 2862 // Compiled stubs don't age, and so they don't need the predictable code |
| 2902 // ageing sequence. | 2863 // ageing sequence. |
| 2903 __ Push(lr, fp, cp, Tmp0()); | 2864 __ Push(lr, fp, cp, temp); |
| 2904 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | 2865 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); |
| 2905 } else { | 2866 } else { |
| 2906 if (isolate()->IsCodePreAgingActive()) { | 2867 if (isolate()->IsCodePreAgingActive()) { |
| 2907 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); | 2868 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); |
| 2908 __ EmitCodeAgeSequence(stub); | 2869 __ EmitCodeAgeSequence(stub); |
| 2909 } else { | 2870 } else { |
| 2910 __ EmitFrameSetupForCodeAgePatching(); | 2871 __ EmitFrameSetupForCodeAgePatching(); |
| 2911 } | 2872 } |
| 2912 } | 2873 } |
| 2913 } | 2874 } |
| 2914 | 2875 |
| 2915 | 2876 |
| 2916 void MacroAssembler::EnterFrame(StackFrame::Type type) { | 2877 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
| 2917 ASSERT(jssp.Is(StackPointer())); | 2878 ASSERT(jssp.Is(StackPointer())); |
| 2879 UseScratchRegisterScope temps(this); |
| 2880 Register type_reg = temps.AcquireX(); |
| 2881 Register code_reg = temps.AcquireX(); |
| 2882 |
| 2918 Push(lr, fp, cp); | 2883 Push(lr, fp, cp); |
| 2919 Mov(Tmp1(), Operand(Smi::FromInt(type))); | 2884 Mov(type_reg, Operand(Smi::FromInt(type))); |
| 2920 Mov(Tmp0(), Operand(CodeObject())); | 2885 Mov(code_reg, Operand(CodeObject())); |
| 2921 Push(Tmp1(), Tmp0()); | 2886 Push(type_reg, code_reg); |
| 2922 // jssp[4] : lr | 2887 // jssp[4] : lr |
| 2923 // jssp[3] : fp | 2888 // jssp[3] : fp |
| 2924 // jssp[2] : cp | 2889 // jssp[2] : cp |
| 2925 // jssp[1] : type | 2890 // jssp[1] : type |
| 2926 // jssp[0] : code object | 2891 // jssp[0] : code object |
| 2927 | 2892 |
| 2928 // Adjust FP to point to saved FP. | 2893 // Adjust FP to point to saved FP. |
| 2929 add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); | 2894 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); |
| 2930 } | 2895 } |
| 2931 | 2896 |
| 2932 | 2897 |
| 2933 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | 2898 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
| 2934 ASSERT(jssp.Is(StackPointer())); | 2899 ASSERT(jssp.Is(StackPointer())); |
| 2935 // Drop the execution stack down to the frame pointer and restore | 2900 // Drop the execution stack down to the frame pointer and restore |
| 2936 // the caller frame pointer and return address. | 2901 // the caller frame pointer and return address. |
| 2937 Mov(jssp, fp); | 2902 Mov(jssp, fp); |
| 2938 AssertStackConsistency(); | 2903 AssertStackConsistency(); |
| 2939 Pop(fp, lr); | 2904 Pop(fp, lr); |
| 2940 } | 2905 } |
| 2941 | 2906 |
| 2942 | 2907 |
| 2943 void MacroAssembler::ExitFramePreserveFPRegs() { | 2908 void MacroAssembler::ExitFramePreserveFPRegs() { |
| 2944 PushCPURegList(kCallerSavedFP); | 2909 PushCPURegList(kCallerSavedFP); |
| 2945 } | 2910 } |
| 2946 | 2911 |
| 2947 | 2912 |
| 2948 void MacroAssembler::ExitFrameRestoreFPRegs() { | 2913 void MacroAssembler::ExitFrameRestoreFPRegs() { |
| 2949 // Read the registers from the stack without popping them. The stack pointer | 2914 // Read the registers from the stack without popping them. The stack pointer |
| 2950 // will be reset as part of the unwinding process. | 2915 // will be reset as part of the unwinding process. |
| 2951 CPURegList saved_fp_regs = kCallerSavedFP; | 2916 CPURegList saved_fp_regs = kCallerSavedFP; |
| 2952 ASSERT(saved_fp_regs.Count() % 2 == 0); | 2917 ASSERT(saved_fp_regs.Count() % 2 == 0); |
| 2953 | 2918 |
| 2954 int offset = ExitFrameConstants::kLastExitFrameField; | 2919 int offset = ExitFrameConstants::kLastExitFrameField; |
| 2955 while (!saved_fp_regs.IsEmpty()) { | 2920 while (!saved_fp_regs.IsEmpty()) { |
| 2956 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); | 2921 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); |
| 2957 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); | 2922 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); |
| 2958 offset -= 2 * kDRegSizeInBytes; | 2923 offset -= 2 * kDRegSize; |
| 2959 Ldp(dst1, dst0, MemOperand(fp, offset)); | 2924 Ldp(dst1, dst0, MemOperand(fp, offset)); |
| 2960 } | 2925 } |
| 2961 } | 2926 } |
| 2962 | 2927 |
| 2963 | 2928 |
| 2964 // TODO(jbramley): Check that we're handling the frame pointer correctly. | 2929 // TODO(jbramley): Check that we're handling the frame pointer correctly. |
| 2965 void MacroAssembler::EnterExitFrame(bool save_doubles, | 2930 void MacroAssembler::EnterExitFrame(bool save_doubles, |
| 2966 const Register& scratch, | 2931 const Register& scratch, |
| 2967 int extra_space) { | 2932 int extra_space) { |
| 2968 ASSERT(jssp.Is(StackPointer())); | 2933 ASSERT(jssp.Is(StackPointer())); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 2993 | 2958 |
| 2994 STATIC_ASSERT((-2 * kPointerSize) == | 2959 STATIC_ASSERT((-2 * kPointerSize) == |
| 2995 ExitFrameConstants::kLastExitFrameField); | 2960 ExitFrameConstants::kLastExitFrameField); |
| 2996 if (save_doubles) { | 2961 if (save_doubles) { |
| 2997 ExitFramePreserveFPRegs(); | 2962 ExitFramePreserveFPRegs(); |
| 2998 } | 2963 } |
| 2999 | 2964 |
| 3000 // Reserve space for the return address and for user requested memory. | 2965 // Reserve space for the return address and for user requested memory. |
| 3001 // We do this before aligning to make sure that we end up correctly | 2966 // We do this before aligning to make sure that we end up correctly |
| 3002 // aligned with the minimum of wasted space. | 2967 // aligned with the minimum of wasted space. |
| 3003 Claim(extra_space + 1, kXRegSizeInBytes); | 2968 Claim(extra_space + 1, kXRegSize); |
| 3004 // fp[8]: CallerPC (lr) | 2969 // fp[8]: CallerPC (lr) |
| 3005 // fp -> fp[0]: CallerFP (old fp) | 2970 // fp -> fp[0]: CallerFP (old fp) |
| 3006 // fp[-8]: Space reserved for SPOffset. | 2971 // fp[-8]: Space reserved for SPOffset. |
| 3007 // fp[-16]: CodeObject() | 2972 // fp[-16]: CodeObject() |
| 3008 // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true). | 2973 // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true). |
| 3009 // jssp[8]: Extra space reserved for caller (if extra_space != 0). | 2974 // jssp[8]: Extra space reserved for caller (if extra_space != 0). |
| 3010 // jssp -> jssp[0]: Space reserved for the return address. | 2975 // jssp -> jssp[0]: Space reserved for the return address. |
| 3011 | 2976 |
| 3012 // Align and synchronize the system stack pointer with jssp. | 2977 // Align and synchronize the system stack pointer with jssp. |
| 3013 AlignAndSetCSPForFrame(); | 2978 AlignAndSetCSPForFrame(); |
| 3014 ASSERT(csp.Is(StackPointer())); | 2979 ASSERT(csp.Is(StackPointer())); |
| 3015 | 2980 |
| 3016 // fp[8]: CallerPC (lr) | 2981 // fp[8]: CallerPC (lr) |
| 3017 // fp -> fp[0]: CallerFP (old fp) | 2982 // fp -> fp[0]: CallerFP (old fp) |
| 3018 // fp[-8]: Space reserved for SPOffset. | 2983 // fp[-8]: Space reserved for SPOffset. |
| 3019 // fp[-16]: CodeObject() | 2984 // fp[-16]: CodeObject() |
| 3020 // csp[...]: Saved doubles, if saved_doubles is true. | 2985 // csp[...]: Saved doubles, if saved_doubles is true. |
| 3021 // csp[8]: Memory reserved for the caller if extra_space != 0. | 2986 // csp[8]: Memory reserved for the caller if extra_space != 0. |
| 3022 // Alignment padding, if necessary. | 2987 // Alignment padding, if necessary. |
| 3023 // csp -> csp[0]: Space reserved for the return address. | 2988 // csp -> csp[0]: Space reserved for the return address. |
| 3024 | 2989 |
| 3025 // ExitFrame::GetStateForFramePointer expects to find the return address at | 2990 // ExitFrame::GetStateForFramePointer expects to find the return address at |
| 3026 // the memory address immediately below the pointer stored in SPOffset. | 2991 // the memory address immediately below the pointer stored in SPOffset. |
| 3027 // It is not safe to derive much else from SPOffset, because the size of the | 2992 // It is not safe to derive much else from SPOffset, because the size of the |
| 3028 // padding can vary. | 2993 // padding can vary. |
| 3029 Add(scratch, csp, kXRegSizeInBytes); | 2994 Add(scratch, csp, kXRegSize); |
| 3030 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 2995 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
| 3031 } | 2996 } |
| 3032 | 2997 |
| 3033 | 2998 |
| 3034 // Leave the current exit frame. | 2999 // Leave the current exit frame. |
| 3035 void MacroAssembler::LeaveExitFrame(bool restore_doubles, | 3000 void MacroAssembler::LeaveExitFrame(bool restore_doubles, |
| 3036 const Register& scratch, | 3001 const Register& scratch, |
| 3037 bool restore_context) { | 3002 bool restore_context) { |
| 3038 ASSERT(csp.Is(StackPointer())); | 3003 ASSERT(csp.Is(StackPointer())); |
| 3039 | 3004 |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3161 Push(x10); | 3126 Push(x10); |
| 3162 // Set this new handler as the current one. | 3127 // Set this new handler as the current one. |
| 3163 Str(jssp, MemOperand(x11)); | 3128 Str(jssp, MemOperand(x11)); |
| 3164 } | 3129 } |
| 3165 | 3130 |
| 3166 | 3131 |
| 3167 void MacroAssembler::PopTryHandler() { | 3132 void MacroAssembler::PopTryHandler() { |
| 3168 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 3133 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 3169 Pop(x10); | 3134 Pop(x10); |
| 3170 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 3135 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 3171 Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes); | 3136 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes); |
| 3172 Str(x10, MemOperand(x11)); | 3137 Str(x10, MemOperand(x11)); |
| 3173 } | 3138 } |
| 3174 | 3139 |
| 3175 | 3140 |
| 3176 void MacroAssembler::Allocate(int object_size, | 3141 void MacroAssembler::Allocate(int object_size, |
| 3177 Register result, | 3142 Register result, |
| 3178 Register scratch1, | 3143 Register scratch1, |
| 3179 Register scratch2, | 3144 Register scratch2, |
| 3180 Label* gc_required, | 3145 Label* gc_required, |
| 3181 AllocationFlags flags) { | 3146 AllocationFlags flags) { |
| 3182 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | 3147 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); |
| 3183 if (!FLAG_inline_new) { | 3148 if (!FLAG_inline_new) { |
| 3184 if (emit_debug_code()) { | 3149 if (emit_debug_code()) { |
| 3185 // Trash the registers to simulate an allocation failure. | 3150 // Trash the registers to simulate an allocation failure. |
| 3186 // We apply salt to the original zap value to easily spot the values. | 3151 // We apply salt to the original zap value to easily spot the values. |
| 3187 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | 3152 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); |
| 3188 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | 3153 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); |
| 3189 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | 3154 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); |
| 3190 } | 3155 } |
| 3191 B(gc_required); | 3156 B(gc_required); |
| 3192 return; | 3157 return; |
| 3193 } | 3158 } |
| 3194 | 3159 |
| 3195 ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1())); | 3160 UseScratchRegisterScope temps(this); |
| 3196 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() && | 3161 Register scratch3 = temps.AcquireX(); |
| 3197 Tmp0().Is64Bits() && Tmp1().Is64Bits()); | 3162 |
| 3163 ASSERT(!AreAliased(result, scratch1, scratch2, scratch3)); |
| 3164 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); |
| 3198 | 3165 |
| 3199 // Make object size into bytes. | 3166 // Make object size into bytes. |
| 3200 if ((flags & SIZE_IN_WORDS) != 0) { | 3167 if ((flags & SIZE_IN_WORDS) != 0) { |
| 3201 object_size *= kPointerSize; | 3168 object_size *= kPointerSize; |
| 3202 } | 3169 } |
| 3203 ASSERT(0 == (object_size & kObjectAlignmentMask)); | 3170 ASSERT(0 == (object_size & kObjectAlignmentMask)); |
| 3204 | 3171 |
| 3205 // Check relative positions of allocation top and limit addresses. | 3172 // Check relative positions of allocation top and limit addresses. |
| 3206 // The values must be adjacent in memory to allow the use of LDP. | 3173 // The values must be adjacent in memory to allow the use of LDP. |
| 3207 ExternalReference heap_allocation_top = | 3174 ExternalReference heap_allocation_top = |
| 3208 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 3175 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
| 3209 ExternalReference heap_allocation_limit = | 3176 ExternalReference heap_allocation_limit = |
| 3210 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 3177 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 3211 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | 3178 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); |
| 3212 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | 3179 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); |
| 3213 ASSERT((limit - top) == kPointerSize); | 3180 ASSERT((limit - top) == kPointerSize); |
| 3214 | 3181 |
| 3215 // Set up allocation top address and object size registers. | 3182 // Set up allocation top address and object size registers. |
| 3216 Register top_address = scratch1; | 3183 Register top_address = scratch1; |
| 3217 Register allocation_limit = scratch2; | 3184 Register allocation_limit = scratch2; |
| 3218 Mov(top_address, Operand(heap_allocation_top)); | 3185 Mov(top_address, Operand(heap_allocation_top)); |
| 3219 | 3186 |
| 3220 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 3187 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 3221 // Load allocation top into result and the allocation limit. | 3188 // Load allocation top into result and the allocation limit. |
| 3222 Ldp(result, allocation_limit, MemOperand(top_address)); | 3189 Ldp(result, allocation_limit, MemOperand(top_address)); |
| 3223 } else { | 3190 } else { |
| 3224 if (emit_debug_code()) { | 3191 if (emit_debug_code()) { |
| 3225 // Assert that result actually contains top on entry. | 3192 // Assert that result actually contains top on entry. |
| 3226 Ldr(Tmp0(), MemOperand(top_address)); | 3193 Ldr(scratch3, MemOperand(top_address)); |
| 3227 Cmp(result, Tmp0()); | 3194 Cmp(result, scratch3); |
| 3228 Check(eq, kUnexpectedAllocationTop); | 3195 Check(eq, kUnexpectedAllocationTop); |
| 3229 } | 3196 } |
| 3230 // Load the allocation limit. 'result' already contains the allocation top. | 3197 // Load the allocation limit. 'result' already contains the allocation top. |
| 3231 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | 3198 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
| 3232 } | 3199 } |
| 3233 | 3200 |
| 3234 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 3201 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
| 3235 // the same alignment on A64. | 3202 // the same alignment on A64. |
| 3236 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 3203 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 3237 | 3204 |
| 3238 // Calculate new top and bail out if new space is exhausted. | 3205 // Calculate new top and bail out if new space is exhausted. |
| 3239 Adds(Tmp1(), result, object_size); | 3206 Adds(scratch3, result, object_size); |
| 3240 B(vs, gc_required); | 3207 B(vs, gc_required); |
| 3241 Cmp(Tmp1(), allocation_limit); | 3208 Cmp(scratch3, allocation_limit); |
| 3242 B(hi, gc_required); | 3209 B(hi, gc_required); |
| 3243 Str(Tmp1(), MemOperand(top_address)); | 3210 Str(scratch3, MemOperand(top_address)); |
| 3244 | 3211 |
| 3245 // Tag the object if requested. | 3212 // Tag the object if requested. |
| 3246 if ((flags & TAG_OBJECT) != 0) { | 3213 if ((flags & TAG_OBJECT) != 0) { |
| 3247 Orr(result, result, kHeapObjectTag); | 3214 Orr(result, result, kHeapObjectTag); |
| 3248 } | 3215 } |
| 3249 } | 3216 } |
| 3250 | 3217 |
| 3251 | 3218 |
| 3252 void MacroAssembler::Allocate(Register object_size, | 3219 void MacroAssembler::Allocate(Register object_size, |
| 3253 Register result, | 3220 Register result, |
| 3254 Register scratch1, | 3221 Register scratch1, |
| 3255 Register scratch2, | 3222 Register scratch2, |
| 3256 Label* gc_required, | 3223 Label* gc_required, |
| 3257 AllocationFlags flags) { | 3224 AllocationFlags flags) { |
| 3258 if (!FLAG_inline_new) { | 3225 if (!FLAG_inline_new) { |
| 3259 if (emit_debug_code()) { | 3226 if (emit_debug_code()) { |
| 3260 // Trash the registers to simulate an allocation failure. | 3227 // Trash the registers to simulate an allocation failure. |
| 3261 // We apply salt to the original zap value to easily spot the values. | 3228 // We apply salt to the original zap value to easily spot the values. |
| 3262 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | 3229 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); |
| 3263 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | 3230 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); |
| 3264 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | 3231 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); |
| 3265 } | 3232 } |
| 3266 B(gc_required); | 3233 B(gc_required); |
| 3267 return; | 3234 return; |
| 3268 } | 3235 } |
| 3269 | 3236 |
| 3270 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1())); | 3237 UseScratchRegisterScope temps(this); |
| 3271 ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() && | 3238 Register scratch3 = temps.AcquireX(); |
| 3272 scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits()); | 3239 |
| 3240 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3)); |
| 3241 ASSERT(object_size.Is64Bits() && result.Is64Bits() && |
| 3242 scratch1.Is64Bits() && scratch2.Is64Bits()); |
| 3273 | 3243 |
| 3274 // Check relative positions of allocation top and limit addresses. | 3244 // Check relative positions of allocation top and limit addresses. |
| 3275 // The values must be adjacent in memory to allow the use of LDP. | 3245 // The values must be adjacent in memory to allow the use of LDP. |
| 3276 ExternalReference heap_allocation_top = | 3246 ExternalReference heap_allocation_top = |
| 3277 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 3247 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
| 3278 ExternalReference heap_allocation_limit = | 3248 ExternalReference heap_allocation_limit = |
| 3279 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 3249 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 3280 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | 3250 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); |
| 3281 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | 3251 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); |
| 3282 ASSERT((limit - top) == kPointerSize); | 3252 ASSERT((limit - top) == kPointerSize); |
| 3283 | 3253 |
| 3284 // Set up allocation top address and object size registers. | 3254 // Set up allocation top address and object size registers. |
| 3285 Register top_address = scratch1; | 3255 Register top_address = scratch1; |
| 3286 Register allocation_limit = scratch2; | 3256 Register allocation_limit = scratch2; |
| 3287 Mov(top_address, Operand(heap_allocation_top)); | 3257 Mov(top_address, Operand(heap_allocation_top)); |
| 3288 | 3258 |
| 3289 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 3259 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 3290 // Load allocation top into result and the allocation limit. | 3260 // Load allocation top into result and the allocation limit. |
| 3291 Ldp(result, allocation_limit, MemOperand(top_address)); | 3261 Ldp(result, allocation_limit, MemOperand(top_address)); |
| 3292 } else { | 3262 } else { |
| 3293 if (emit_debug_code()) { | 3263 if (emit_debug_code()) { |
| 3294 // Assert that result actually contains top on entry. | 3264 // Assert that result actually contains top on entry. |
| 3295 Ldr(Tmp0(), MemOperand(top_address)); | 3265 Ldr(scratch3, MemOperand(top_address)); |
| 3296 Cmp(result, Tmp0()); | 3266 Cmp(result, scratch3); |
| 3297 Check(eq, kUnexpectedAllocationTop); | 3267 Check(eq, kUnexpectedAllocationTop); |
| 3298 } | 3268 } |
| 3299 // Load the allocation limit. 'result' already contains the allocation top. | 3269 // Load the allocation limit. 'result' already contains the allocation top. |
| 3300 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | 3270 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
| 3301 } | 3271 } |
| 3302 | 3272 |
| 3303 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 3273 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
| 3304 // the same alignment on A64. | 3274 // the same alignment on A64. |
| 3305 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 3275 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 3306 | 3276 |
| 3307 // Calculate new top and bail out if new space is exhausted | 3277 // Calculate new top and bail out if new space is exhausted |
| 3308 if ((flags & SIZE_IN_WORDS) != 0) { | 3278 if ((flags & SIZE_IN_WORDS) != 0) { |
| 3309 Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2)); | 3279 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2)); |
| 3310 } else { | 3280 } else { |
| 3311 Adds(Tmp1(), result, object_size); | 3281 Adds(scratch3, result, object_size); |
| 3312 } | 3282 } |
| 3313 | 3283 |
| 3314 if (emit_debug_code()) { | 3284 if (emit_debug_code()) { |
| 3315 Tst(Tmp1(), kObjectAlignmentMask); | 3285 Tst(scratch3, kObjectAlignmentMask); |
| 3316 Check(eq, kUnalignedAllocationInNewSpace); | 3286 Check(eq, kUnalignedAllocationInNewSpace); |
| 3317 } | 3287 } |
| 3318 | 3288 |
| 3319 B(vs, gc_required); | 3289 B(vs, gc_required); |
| 3320 Cmp(Tmp1(), allocation_limit); | 3290 Cmp(scratch3, allocation_limit); |
| 3321 B(hi, gc_required); | 3291 B(hi, gc_required); |
| 3322 Str(Tmp1(), MemOperand(top_address)); | 3292 Str(scratch3, MemOperand(top_address)); |
| 3323 | 3293 |
| 3324 // Tag the object if requested. | 3294 // Tag the object if requested. |
| 3325 if ((flags & TAG_OBJECT) != 0) { | 3295 if ((flags & TAG_OBJECT) != 0) { |
| 3326 Orr(result, result, kHeapObjectTag); | 3296 Orr(result, result, kHeapObjectTag); |
| 3327 } | 3297 } |
| 3328 } | 3298 } |
| 3329 | 3299 |
| 3330 | 3300 |
| 3331 void MacroAssembler::UndoAllocationInNewSpace(Register object, | 3301 void MacroAssembler::UndoAllocationInNewSpace(Register object, |
| 3332 Register scratch) { | 3302 Register scratch) { |
| (...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3640 } | 3610 } |
| 3641 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 3611 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 3642 Cmp(scratch, Operand(map)); | 3612 Cmp(scratch, Operand(map)); |
| 3643 B(ne, &fail); | 3613 B(ne, &fail); |
| 3644 Jump(success, RelocInfo::CODE_TARGET); | 3614 Jump(success, RelocInfo::CODE_TARGET); |
| 3645 Bind(&fail); | 3615 Bind(&fail); |
| 3646 } | 3616 } |
| 3647 | 3617 |
| 3648 | 3618 |
| 3649 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { | 3619 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { |
| 3650 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 3620 UseScratchRegisterScope temps(this); |
| 3651 Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset)); | 3621 Register temp = temps.AcquireX(); |
| 3652 Tst(Tmp0(), mask); | 3622 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3623 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); |
| 3624 Tst(temp, mask); |
| 3653 } | 3625 } |
| 3654 | 3626 |
| 3655 | 3627 |
| 3656 void MacroAssembler::LoadElementsKind(Register result, Register object) { | 3628 void MacroAssembler::LoadElementsKind(Register result, Register object) { |
| 3657 // Load map. | 3629 // Load map. |
| 3658 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); | 3630 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3659 // Load the map's "bit field 2". | 3631 // Load the map's "bit field 2". |
| 3660 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset)); | 3632 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset)); |
| 3661 // Retrieve elements_kind from bit field 2. | 3633 // Retrieve elements_kind from bit field 2. |
| 3662 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); | 3634 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3714 Bind(&non_instance); | 3686 Bind(&non_instance); |
| 3715 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | 3687 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); |
| 3716 | 3688 |
| 3717 // All done. | 3689 // All done. |
| 3718 Bind(&done); | 3690 Bind(&done); |
| 3719 } | 3691 } |
| 3720 | 3692 |
| 3721 | 3693 |
| 3722 void MacroAssembler::CompareRoot(const Register& obj, | 3694 void MacroAssembler::CompareRoot(const Register& obj, |
| 3723 Heap::RootListIndex index) { | 3695 Heap::RootListIndex index) { |
| 3724 ASSERT(!AreAliased(obj, Tmp0())); | 3696 UseScratchRegisterScope temps(this); |
| 3725 LoadRoot(Tmp0(), index); | 3697 Register temp = temps.AcquireX(); |
| 3726 Cmp(obj, Tmp0()); | 3698 ASSERT(!AreAliased(obj, temp)); |
| 3699 LoadRoot(temp, index); |
| 3700 Cmp(obj, temp); |
| 3727 } | 3701 } |
| 3728 | 3702 |
| 3729 | 3703 |
| 3730 void MacroAssembler::JumpIfRoot(const Register& obj, | 3704 void MacroAssembler::JumpIfRoot(const Register& obj, |
| 3731 Heap::RootListIndex index, | 3705 Heap::RootListIndex index, |
| 3732 Label* if_equal) { | 3706 Label* if_equal) { |
| 3733 CompareRoot(obj, index); | 3707 CompareRoot(obj, index); |
| 3734 B(eq, if_equal); | 3708 B(eq, if_equal); |
| 3735 } | 3709 } |
| 3736 | 3710 |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3913 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); | 3887 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); |
| 3914 Check(lt, kIndexIsTooLarge); | 3888 Check(lt, kIndexIsTooLarge); |
| 3915 | 3889 |
| 3916 ASSERT_EQ(0, Smi::FromInt(0)); | 3890 ASSERT_EQ(0, Smi::FromInt(0)); |
| 3917 Cmp(index, 0); | 3891 Cmp(index, 0); |
| 3918 Check(ge, kIndexIsNegative); | 3892 Check(ge, kIndexIsNegative); |
| 3919 } | 3893 } |
| 3920 | 3894 |
| 3921 | 3895 |
| 3922 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 3896 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 3923 Register scratch, | 3897 Register scratch1, |
| 3898 Register scratch2, |
| 3924 Label* miss) { | 3899 Label* miss) { |
| 3925 // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function. | 3900 ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); |
| 3926 // The ARM version takes two scratch registers, and that should be enough for | |
| 3927 // all of the checks. | |
| 3928 | |
| 3929 Label same_contexts; | 3901 Label same_contexts; |
| 3930 | 3902 |
| 3931 ASSERT(!AreAliased(holder_reg, scratch)); | |
| 3932 | |
| 3933 // Load current lexical context from the stack frame. | 3903 // Load current lexical context from the stack frame. |
| 3934 Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 3904 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3935 // In debug mode, make sure the lexical context is set. | 3905 // In debug mode, make sure the lexical context is set. |
| 3936 #ifdef DEBUG | 3906 #ifdef DEBUG |
| 3937 Cmp(scratch, 0); | 3907 Cmp(scratch1, 0); |
| 3938 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | 3908 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); |
| 3939 #endif | 3909 #endif |
| 3940 | 3910 |
| 3941 // Load the native context of the current context. | 3911 // Load the native context of the current context. |
| 3942 int offset = | 3912 int offset = |
| 3943 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; | 3913 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
| 3944 Ldr(scratch, FieldMemOperand(scratch, offset)); | 3914 Ldr(scratch1, FieldMemOperand(scratch1, offset)); |
| 3945 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 3915 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); |
| 3946 | 3916 |
| 3947 // Check the context is a native context. | 3917 // Check the context is a native context. |
| 3948 if (emit_debug_code()) { | 3918 if (emit_debug_code()) { |
| 3949 // Read the first word and compare to the global_context_map. | 3919 // Read the first word and compare to the global_context_map. |
| 3950 Register temp = Tmp1(); | 3920 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset)); |
| 3951 Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 3921 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex); |
| 3952 CompareRoot(temp, Heap::kNativeContextMapRootIndex); | |
| 3953 Check(eq, kExpectedNativeContext); | 3922 Check(eq, kExpectedNativeContext); |
| 3954 } | 3923 } |
| 3955 | 3924 |
| 3956 // Check if both contexts are the same. | 3925 // Check if both contexts are the same. |
| 3957 ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 3926 Ldr(scratch2, FieldMemOperand(holder_reg, |
| 3958 cmp(scratch, Tmp0()); | 3927 JSGlobalProxy::kNativeContextOffset)); |
| 3959 b(&same_contexts, eq); | 3928 Cmp(scratch1, scratch2); |
| 3929 B(&same_contexts, eq); |
| 3960 | 3930 |
| 3961 // Check the context is a native context. | 3931 // Check the context is a native context. |
| 3962 if (emit_debug_code()) { | 3932 if (emit_debug_code()) { |
| 3963 // Move Tmp0() into a different register, as CompareRoot will use it. | 3933 // We're short on scratch registers here, so use holder_reg as a scratch. |
| 3964 Register temp = Tmp1(); | 3934 Push(holder_reg); |
| 3965 mov(temp, Tmp0()); | 3935 Register scratch3 = holder_reg; |
| 3966 CompareRoot(temp, Heap::kNullValueRootIndex); | 3936 |
| 3937 CompareRoot(scratch2, Heap::kNullValueRootIndex); |
| 3967 Check(ne, kExpectedNonNullContext); | 3938 Check(ne, kExpectedNonNullContext); |
| 3968 | 3939 |
| 3969 Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset)); | 3940 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); |
| 3970 CompareRoot(temp, Heap::kNativeContextMapRootIndex); | 3941 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex); |
| 3971 Check(eq, kExpectedNativeContext); | 3942 Check(eq, kExpectedNativeContext); |
| 3972 | 3943 Pop(holder_reg); |
| 3973 // Let's consider that Tmp0() has been cloberred by the MacroAssembler. | |
| 3974 // We reload it with its value. | |
| 3975 ldr(Tmp0(), FieldMemOperand(holder_reg, | |
| 3976 JSGlobalProxy::kNativeContextOffset)); | |
| 3977 } | 3944 } |
| 3978 | 3945 |
| 3979 // Check that the security token in the calling global object is | 3946 // Check that the security token in the calling global object is |
| 3980 // compatible with the security token in the receiving global | 3947 // compatible with the security token in the receiving global |
| 3981 // object. | 3948 // object. |
| 3982 int token_offset = Context::kHeaderSize + | 3949 int token_offset = Context::kHeaderSize + |
| 3983 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 3950 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
| 3984 | 3951 |
| 3985 ldr(scratch, FieldMemOperand(scratch, token_offset)); | 3952 Ldr(scratch1, FieldMemOperand(scratch1, token_offset)); |
| 3986 ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset)); | 3953 Ldr(scratch2, FieldMemOperand(scratch2, token_offset)); |
| 3987 cmp(scratch, Tmp0()); | 3954 Cmp(scratch1, scratch2); |
| 3988 b(miss, ne); | 3955 B(miss, ne); |
| 3989 | 3956 |
| 3990 bind(&same_contexts); | 3957 Bind(&same_contexts); |
| 3991 } | 3958 } |
| 3992 | 3959 |
| 3993 | 3960 |
| 3994 // Compute the hash code from the untagged key. This must be kept in sync with | 3961 // Compute the hash code from the untagged key. This must be kept in sync with |
| 3995 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in | 3962 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in |
| 3996 // code-stub-hydrogen.cc | 3963 // code-stub-hydrogen.cc |
| 3997 void MacroAssembler::GetNumberHash(Register key, Register scratch) { | 3964 void MacroAssembler::GetNumberHash(Register key, Register scratch) { |
| 3998 ASSERT(!AreAliased(key, scratch)); | 3965 ASSERT(!AreAliased(key, scratch)); |
| 3999 | 3966 |
| 4000 // Xor original key with a seed. | 3967 // Xor original key with a seed. |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4083 | 4050 |
| 4084 // Get the value at the masked, scaled index and return. | 4051 // Get the value at the masked, scaled index and return. |
| 4085 const int kValueOffset = | 4052 const int kValueOffset = |
| 4086 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 4053 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
| 4087 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); | 4054 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); |
| 4088 } | 4055 } |
| 4089 | 4056 |
| 4090 | 4057 |
| 4091 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | 4058 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. |
| 4092 Register address, | 4059 Register address, |
| 4093 Register scratch, | 4060 Register scratch1, |
| 4094 SaveFPRegsMode fp_mode, | 4061 SaveFPRegsMode fp_mode, |
| 4095 RememberedSetFinalAction and_then) { | 4062 RememberedSetFinalAction and_then) { |
| 4096 ASSERT(!AreAliased(object, address, scratch)); | 4063 ASSERT(!AreAliased(object, address, scratch1)); |
| 4097 Label done, store_buffer_overflow; | 4064 Label done, store_buffer_overflow; |
| 4098 if (emit_debug_code()) { | 4065 if (emit_debug_code()) { |
| 4099 Label ok; | 4066 Label ok; |
| 4100 JumpIfNotInNewSpace(object, &ok); | 4067 JumpIfNotInNewSpace(object, &ok); |
| 4101 Abort(kRememberedSetPointerInNewSpace); | 4068 Abort(kRememberedSetPointerInNewSpace); |
| 4102 bind(&ok); | 4069 bind(&ok); |
| 4103 } | 4070 } |
| 4071 UseScratchRegisterScope temps(this); |
| 4072 Register scratch2 = temps.AcquireX(); |
| 4073 |
| 4104 // Load store buffer top. | 4074 // Load store buffer top. |
| 4105 Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate()))); | 4075 Mov(scratch2, Operand(ExternalReference::store_buffer_top(isolate()))); |
| 4106 Ldr(scratch, MemOperand(Tmp0())); | 4076 Ldr(scratch1, MemOperand(scratch2)); |
| 4107 // Store pointer to buffer and increment buffer top. | 4077 // Store pointer to buffer and increment buffer top. |
| 4108 Str(address, MemOperand(scratch, kPointerSize, PostIndex)); | 4078 Str(address, MemOperand(scratch1, kPointerSize, PostIndex)); |
| 4109 // Write back new top of buffer. | 4079 // Write back new top of buffer. |
| 4110 Str(scratch, MemOperand(Tmp0())); | 4080 Str(scratch1, MemOperand(scratch2)); |
| 4111 // Call stub on end of buffer. | 4081 // Call stub on end of buffer. |
| 4112 // Check for end of buffer. | 4082 // Check for end of buffer. |
| 4113 ASSERT(StoreBuffer::kStoreBufferOverflowBit == | 4083 ASSERT(StoreBuffer::kStoreBufferOverflowBit == |
| 4114 (1 << (14 + kPointerSizeLog2))); | 4084 (1 << (14 + kPointerSizeLog2))); |
| 4115 if (and_then == kFallThroughAtEnd) { | 4085 if (and_then == kFallThroughAtEnd) { |
| 4116 Tbz(scratch, (14 + kPointerSizeLog2), &done); | 4086 Tbz(scratch1, (14 + kPointerSizeLog2), &done); |
| 4117 } else { | 4087 } else { |
| 4118 ASSERT(and_then == kReturnAtEnd); | 4088 ASSERT(and_then == kReturnAtEnd); |
| 4119 Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow); | 4089 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow); |
| 4120 Ret(); | 4090 Ret(); |
| 4121 } | 4091 } |
| 4122 | 4092 |
| 4123 Bind(&store_buffer_overflow); | 4093 Bind(&store_buffer_overflow); |
| 4124 Push(lr); | 4094 Push(lr); |
| 4125 StoreBufferOverflowStub store_buffer_overflow_stub = | 4095 StoreBufferOverflowStub store_buffer_overflow_stub = |
| 4126 StoreBufferOverflowStub(fp_mode); | 4096 StoreBufferOverflowStub(fp_mode); |
| 4127 CallStub(&store_buffer_overflow_stub); | 4097 CallStub(&store_buffer_overflow_stub); |
| 4128 Pop(lr); | 4098 Pop(lr); |
| 4129 | 4099 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 4145 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so | 4115 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so |
| 4146 // adjust the stack for unsaved registers. | 4116 // adjust the stack for unsaved registers. |
| 4147 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 4117 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
| 4148 ASSERT(num_unsaved >= 0); | 4118 ASSERT(num_unsaved >= 0); |
| 4149 Claim(num_unsaved); | 4119 Claim(num_unsaved); |
| 4150 PushXRegList(kSafepointSavedRegisters); | 4120 PushXRegList(kSafepointSavedRegisters); |
| 4151 } | 4121 } |
| 4152 | 4122 |
| 4153 | 4123 |
| 4154 void MacroAssembler::PushSafepointFPRegisters() { | 4124 void MacroAssembler::PushSafepointFPRegisters() { |
| 4155 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize, | 4125 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, |
| 4156 FPRegister::kAllocatableFPRegisters)); | 4126 FPRegister::kAllocatableFPRegisters)); |
| 4157 } | 4127 } |
| 4158 | 4128 |
| 4159 | 4129 |
| 4160 void MacroAssembler::PopSafepointFPRegisters() { | 4130 void MacroAssembler::PopSafepointFPRegisters() { |
| 4161 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize, | 4131 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, |
| 4162 FPRegister::kAllocatableFPRegisters)); | 4132 FPRegister::kAllocatableFPRegisters)); |
| 4163 } | 4133 } |
| 4164 | 4134 |
| 4165 | 4135 |
| 4166 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 4136 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
| 4167 // Make sure the safepoint registers list is what we expect. | 4137 // Make sure the safepoint registers list is what we expect. |
| 4168 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); | 4138 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); |
| 4169 | 4139 |
| 4170 // Safepoint registers are stored contiguously on the stack, but not all the | 4140 // Safepoint registers are stored contiguously on the stack, but not all the |
| 4171 // registers are saved. The following registers are excluded: | 4141 // registers are saved. The following registers are excluded: |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4257 | 4227 |
| 4258 // Clobber clobbered input registers when running with the debug-code flag | 4228 // Clobber clobbered input registers when running with the debug-code flag |
| 4259 // turned on to provoke errors. | 4229 // turned on to provoke errors. |
| 4260 if (emit_debug_code()) { | 4230 if (emit_debug_code()) { |
| 4261 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); | 4231 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); |
| 4262 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); | 4232 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); |
| 4263 } | 4233 } |
| 4264 } | 4234 } |
| 4265 | 4235 |
| 4266 | 4236 |
| 4267 // Will clobber: object, address, value, Tmp0(), Tmp1(). | 4237 // Will clobber: object, address, value. |
| 4268 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. | 4238 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. |
| 4269 // | 4239 // |
| 4270 // The register 'object' contains a heap object pointer. The heap object tag is | 4240 // The register 'object' contains a heap object pointer. The heap object tag is |
| 4271 // shifted away. | 4241 // shifted away. |
| 4272 void MacroAssembler::RecordWrite(Register object, | 4242 void MacroAssembler::RecordWrite(Register object, |
| 4273 Register address, | 4243 Register address, |
| 4274 Register value, | 4244 Register value, |
| 4275 LinkRegisterStatus lr_status, | 4245 LinkRegisterStatus lr_status, |
| 4276 SaveFPRegsMode fp_mode, | 4246 SaveFPRegsMode fp_mode, |
| 4277 RememberedSetAction remembered_set_action, | 4247 RememberedSetAction remembered_set_action, |
| 4278 SmiCheck smi_check) { | 4248 SmiCheck smi_check) { |
| 4279 ASM_LOCATION("MacroAssembler::RecordWrite"); | 4249 ASM_LOCATION("MacroAssembler::RecordWrite"); |
| 4280 ASSERT(!AreAliased(object, value)); | 4250 ASSERT(!AreAliased(object, value)); |
| 4281 | 4251 |
| 4282 if (emit_debug_code()) { | 4252 if (emit_debug_code()) { |
| 4283 Ldr(Tmp0(), MemOperand(address)); | 4253 UseScratchRegisterScope temps(this); |
| 4284 Cmp(Tmp0(), value); | 4254 Register temp = temps.AcquireX(); |
| 4255 |
| 4256 Ldr(temp, MemOperand(address)); |
| 4257 Cmp(temp, value); |
| 4285 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 4258 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
| 4286 } | 4259 } |
| 4287 | 4260 |
| 4288 // Count number of write barriers in generated code. | 4261 // Count number of write barriers in generated code. |
| 4289 isolate()->counters()->write_barriers_static()->Increment(); | 4262 isolate()->counters()->write_barriers_static()->Increment(); |
| 4290 // TODO(mstarzinger): Dynamic counter missing. | 4263 // TODO(mstarzinger): Dynamic counter missing. |
| 4291 | 4264 |
| 4292 // First, check if a write barrier is even needed. The tests below | 4265 // First, check if a write barrier is even needed. The tests below |
| 4293 // catch stores of smis and stores into the young generation. | 4266 // catch stores of smis and stores into the young generation. |
| 4294 Label done; | 4267 Label done; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4339 Tbz(reg, 1, &color_is_valid); | 4312 Tbz(reg, 1, &color_is_valid); |
| 4340 Abort(kUnexpectedColorFound); | 4313 Abort(kUnexpectedColorFound); |
| 4341 Bind(&color_is_valid); | 4314 Bind(&color_is_valid); |
| 4342 } | 4315 } |
| 4343 } | 4316 } |
| 4344 | 4317 |
| 4345 | 4318 |
| 4346 void MacroAssembler::GetMarkBits(Register addr_reg, | 4319 void MacroAssembler::GetMarkBits(Register addr_reg, |
| 4347 Register bitmap_reg, | 4320 Register bitmap_reg, |
| 4348 Register shift_reg) { | 4321 Register shift_reg) { |
| 4349 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg)); | 4322 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg)); |
| 4323 ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits()); |
| 4350 // addr_reg is divided into fields: | 4324 // addr_reg is divided into fields: |
| 4351 // |63 page base 20|19 high 8|7 shift 3|2 0| | 4325 // |63 page base 20|19 high 8|7 shift 3|2 0| |
| 4352 // 'high' gives the index of the cell holding color bits for the object. | 4326 // 'high' gives the index of the cell holding color bits for the object. |
| 4353 // 'shift' gives the offset in the cell for this object's color. | 4327 // 'shift' gives the offset in the cell for this object's color. |
| 4354 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | 4328 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
| 4355 Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits); | 4329 UseScratchRegisterScope temps(this); |
| 4330 Register temp = temps.AcquireX(); |
| 4331 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits); |
| 4356 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); | 4332 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); |
| 4357 Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2)); | 4333 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2)); |
| 4358 // bitmap_reg: | 4334 // bitmap_reg: |
| 4359 // |63 page base 20|19 zeros 15|14 high 3|2 0| | 4335 // |63 page base 20|19 zeros 15|14 high 3|2 0| |
| 4360 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | 4336 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); |
| 4361 } | 4337 } |
| 4362 | 4338 |
| 4363 | 4339 |
| 4364 void MacroAssembler::HasColor(Register object, | 4340 void MacroAssembler::HasColor(Register object, |
| 4365 Register bitmap_scratch, | 4341 Register bitmap_scratch, |
| 4366 Register shift_scratch, | 4342 Register shift_scratch, |
| 4367 Label* has_color, | 4343 Label* has_color, |
| (...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4571 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { | 4547 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { |
| 4572 if (emit_debug_code()) { | 4548 if (emit_debug_code()) { |
| 4573 CheckRegisterIsClear(reg, reason); | 4549 CheckRegisterIsClear(reg, reason); |
| 4574 } | 4550 } |
| 4575 } | 4551 } |
| 4576 | 4552 |
| 4577 | 4553 |
| 4578 void MacroAssembler::AssertRegisterIsRoot(Register reg, | 4554 void MacroAssembler::AssertRegisterIsRoot(Register reg, |
| 4579 Heap::RootListIndex index, | 4555 Heap::RootListIndex index, |
| 4580 BailoutReason reason) { | 4556 BailoutReason reason) { |
| 4581 // CompareRoot uses Tmp0(). | |
| 4582 ASSERT(!reg.Is(Tmp0())); | |
| 4583 if (emit_debug_code()) { | 4557 if (emit_debug_code()) { |
| 4584 CompareRoot(reg, index); | 4558 CompareRoot(reg, index); |
| 4585 Check(eq, reason); | 4559 Check(eq, reason); |
| 4586 } | 4560 } |
| 4587 } | 4561 } |
| 4588 | 4562 |
| 4589 | 4563 |
| 4590 void MacroAssembler::AssertFastElements(Register elements) { | 4564 void MacroAssembler::AssertFastElements(Register elements) { |
| 4591 if (emit_debug_code()) { | 4565 if (emit_debug_code()) { |
| 4592 Register temp = Tmp1(); | 4566 UseScratchRegisterScope temps(this); |
| 4567 Register temp = temps.AcquireX(); |
| 4593 Label ok; | 4568 Label ok; |
| 4594 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); | 4569 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 4595 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); | 4570 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); |
| 4596 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); | 4571 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); |
| 4597 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); | 4572 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); |
| 4598 Abort(kJSObjectWithFastElementsMapHasSlowElements); | 4573 Abort(kJSObjectWithFastElementsMapHasSlowElements); |
| 4599 Bind(&ok); | 4574 Bind(&ok); |
| 4600 } | 4575 } |
| 4601 } | 4576 } |
| 4602 | 4577 |
| 4603 | 4578 |
| 4604 void MacroAssembler::AssertIsString(const Register& object) { | 4579 void MacroAssembler::AssertIsString(const Register& object) { |
| 4605 if (emit_debug_code()) { | 4580 if (emit_debug_code()) { |
| 4606 Register temp = Tmp1(); | 4581 UseScratchRegisterScope temps(this); |
| 4582 Register temp = temps.AcquireX(); |
| 4607 STATIC_ASSERT(kSmiTag == 0); | 4583 STATIC_ASSERT(kSmiTag == 0); |
| 4608 Tst(object, Operand(kSmiTagMask)); | 4584 Tst(object, Operand(kSmiTagMask)); |
| 4609 Check(ne, kOperandIsNotAString); | 4585 Check(ne, kOperandIsNotAString); |
| 4610 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 4586 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 4611 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | 4587 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); |
| 4612 Check(lo, kOperandIsNotAString); | 4588 Check(lo, kOperandIsNotAString); |
| 4613 } | 4589 } |
| 4614 } | 4590 } |
| 4615 | 4591 |
| 4616 | 4592 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 4643 } | 4619 } |
| 4644 #endif | 4620 #endif |
| 4645 | 4621 |
| 4646 // Abort is used in some contexts where csp is the stack pointer. In order to | 4622 // Abort is used in some contexts where csp is the stack pointer. In order to |
| 4647 // simplify the CallRuntime code, make sure that jssp is the stack pointer. | 4623 // simplify the CallRuntime code, make sure that jssp is the stack pointer. |
| 4648 // There is no risk of register corruption here because Abort doesn't return. | 4624 // There is no risk of register corruption here because Abort doesn't return. |
| 4649 Register old_stack_pointer = StackPointer(); | 4625 Register old_stack_pointer = StackPointer(); |
| 4650 SetStackPointer(jssp); | 4626 SetStackPointer(jssp); |
| 4651 Mov(jssp, old_stack_pointer); | 4627 Mov(jssp, old_stack_pointer); |
| 4652 | 4628 |
| 4629 // We need some scratch registers for the MacroAssembler, so make sure we have |
| 4630 // some. This is safe here because Abort never returns. |
| 4631 RegList old_tmp_list = TmpList()->list(); |
| 4632 TmpList()->Combine(ip0); |
| 4633 TmpList()->Combine(ip1); |
| 4634 |
| 4653 if (use_real_aborts()) { | 4635 if (use_real_aborts()) { |
| 4654 // Avoid infinite recursion; Push contains some assertions that use Abort. | 4636 // Avoid infinite recursion; Push contains some assertions that use Abort. |
| 4655 NoUseRealAbortsScope no_real_aborts(this); | 4637 NoUseRealAbortsScope no_real_aborts(this); |
| 4656 | 4638 |
| 4657 Mov(x0, Operand(Smi::FromInt(reason))); | 4639 Mov(x0, Operand(Smi::FromInt(reason))); |
| 4658 Push(x0); | 4640 Push(x0); |
| 4659 | 4641 |
| 4660 if (!has_frame_) { | 4642 if (!has_frame_) { |
| 4661 // We don't actually want to generate a pile of code for this, so just | 4643 // We don't actually want to generate a pile of code for this, so just |
| 4662 // claim there is a stack frame, without generating one. | 4644 // claim there is a stack frame, without generating one. |
| 4663 FrameScope scope(this, StackFrame::NONE); | 4645 FrameScope scope(this, StackFrame::NONE); |
| 4664 CallRuntime(Runtime::kAbort, 1); | 4646 CallRuntime(Runtime::kAbort, 1); |
| 4665 } else { | 4647 } else { |
| 4666 CallRuntime(Runtime::kAbort, 1); | 4648 CallRuntime(Runtime::kAbort, 1); |
| 4667 } | 4649 } |
| 4668 } else { | 4650 } else { |
| 4669 // Load the string to pass to Printf. | 4651 // Load the string to pass to Printf. |
| 4670 Label msg_address; | 4652 Label msg_address; |
| 4671 Adr(x0, &msg_address); | 4653 Adr(x0, &msg_address); |
| 4672 | 4654 |
| 4673 // Call Printf directly to report the error. | 4655 // Call Printf directly to report the error. |
| 4674 CallPrintf(); | 4656 CallPrintf(); |
| 4675 | 4657 |
| 4676 // We need a way to stop execution on both the simulator and real hardware, | 4658 // We need a way to stop execution on both the simulator and real hardware, |
| 4677 // and Unreachable() is the best option. | 4659 // and Unreachable() is the best option. |
| 4678 Unreachable(); | 4660 Unreachable(); |
| 4679 | 4661 |
| 4680 // Emit the message string directly in the instruction stream. | 4662 // Emit the message string directly in the instruction stream. |
| 4681 { | 4663 { |
| 4682 BlockConstPoolScope scope(this); | 4664 BlockPoolsScope scope(this); |
| 4683 Bind(&msg_address); | 4665 Bind(&msg_address); |
| 4684 EmitStringData(GetBailoutReason(reason)); | 4666 EmitStringData(GetBailoutReason(reason)); |
| 4685 } | 4667 } |
| 4686 } | 4668 } |
| 4687 | 4669 |
| 4688 SetStackPointer(old_stack_pointer); | 4670 SetStackPointer(old_stack_pointer); |
| 4671 TmpList()->set_list(old_tmp_list); |
| 4689 } | 4672 } |
| 4690 | 4673 |
| 4691 | 4674 |
| 4692 void MacroAssembler::LoadTransitionedArrayMapConditional( | 4675 void MacroAssembler::LoadTransitionedArrayMapConditional( |
| 4693 ElementsKind expected_kind, | 4676 ElementsKind expected_kind, |
| 4694 ElementsKind transitioned_kind, | 4677 ElementsKind transitioned_kind, |
| 4695 Register map_in_out, | 4678 Register map_in_out, |
| 4696 Register scratch, | 4679 Register scratch1, |
| 4680 Register scratch2, |
| 4697 Label* no_map_match) { | 4681 Label* no_map_match) { |
| 4698 // Load the global or builtins object from the current context. | 4682 // Load the global or builtins object from the current context. |
| 4699 Ldr(scratch, GlobalObjectMemOperand()); | 4683 Ldr(scratch1, GlobalObjectMemOperand()); |
| 4700 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 4684 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); |
| 4701 | 4685 |
| 4702 // Check that the function's map is the same as the expected cached map. | 4686 // Check that the function's map is the same as the expected cached map. |
| 4703 Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX)); | 4687 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX)); |
| 4704 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | 4688 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; |
| 4705 Ldr(Tmp0(), FieldMemOperand(scratch, offset)); | 4689 Ldr(scratch2, FieldMemOperand(scratch1, offset)); |
| 4706 Cmp(map_in_out, Tmp0()); | 4690 Cmp(map_in_out, scratch2); |
| 4707 B(ne, no_map_match); | 4691 B(ne, no_map_match); |
| 4708 | 4692 |
| 4709 // Use the transitioned cached map. | 4693 // Use the transitioned cached map. |
| 4710 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | 4694 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; |
| 4711 Ldr(map_in_out, FieldMemOperand(scratch, offset)); | 4695 Ldr(map_in_out, FieldMemOperand(scratch1, offset)); |
| 4712 } | 4696 } |
| 4713 | 4697 |
| 4714 | 4698 |
| 4715 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | 4699 void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
| 4716 // Load the global or builtins object from the current context. | 4700 // Load the global or builtins object from the current context. |
| 4717 Ldr(function, GlobalObjectMemOperand()); | 4701 Ldr(function, GlobalObjectMemOperand()); |
| 4718 // Load the native context from the global or builtins object. | 4702 // Load the native context from the global or builtins object. |
| 4719 Ldr(function, FieldMemOperand(function, | 4703 Ldr(function, FieldMemOperand(function, |
| 4720 GlobalObject::kNativeContextOffset)); | 4704 GlobalObject::kNativeContextOffset)); |
| 4721 // Load the function from the native context. | 4705 // Load the function from the native context. |
| (...skipping 21 matching lines...) Expand all Loading... |
| 4743 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. | 4727 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. |
| 4744 void MacroAssembler::PrintfNoPreserve(const char * format, | 4728 void MacroAssembler::PrintfNoPreserve(const char * format, |
| 4745 const CPURegister& arg0, | 4729 const CPURegister& arg0, |
| 4746 const CPURegister& arg1, | 4730 const CPURegister& arg1, |
| 4747 const CPURegister& arg2, | 4731 const CPURegister& arg2, |
| 4748 const CPURegister& arg3) { | 4732 const CPURegister& arg3) { |
| 4749 // We cannot handle a caller-saved stack pointer. It doesn't make much sense | 4733 // We cannot handle a caller-saved stack pointer. It doesn't make much sense |
| 4750 // in most cases anyway, so this restriction shouldn't be too serious. | 4734 // in most cases anyway, so this restriction shouldn't be too serious. |
| 4751 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); | 4735 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); |
| 4752 | 4736 |
| 4753 // We cannot print Tmp0() or Tmp1() as they're used internally by the macro | 4737 // Make sure that the macro assembler doesn't try to use any of our arguments |
| 4754 // assembler. We cannot print the stack pointer because it is typically used | 4738 // as scratch registers. |
| 4755 // to preserve caller-saved registers (using other Printf variants which | 4739 ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3)); |
| 4756 // depend on this helper). | 4740 ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3)); |
| 4757 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0)); | 4741 |
| 4758 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1)); | 4742 // We cannot print the stack pointer because it is typically used to preserve |
| 4759 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2)); | 4743 // caller-saved registers (using other Printf variants which depend on this |
| 4760 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3)); | 4744 // helper). |
| 4745 ASSERT(!AreAliased(arg0, StackPointer())); |
| 4746 ASSERT(!AreAliased(arg1, StackPointer())); |
| 4747 ASSERT(!AreAliased(arg2, StackPointer())); |
| 4748 ASSERT(!AreAliased(arg3, StackPointer())); |
| 4761 | 4749 |
| 4762 static const int kMaxArgCount = 4; | 4750 static const int kMaxArgCount = 4; |
| 4763 // Assume that we have the maximum number of arguments until we know | 4751 // Assume that we have the maximum number of arguments until we know |
| 4764 // otherwise. | 4752 // otherwise. |
| 4765 int arg_count = kMaxArgCount; | 4753 int arg_count = kMaxArgCount; |
| 4766 | 4754 |
| 4767 // The provided arguments. | 4755 // The provided arguments. |
| 4768 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; | 4756 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; |
| 4769 | 4757 |
| 4770 // The PCS registers where the arguments need to end up. | 4758 // The PCS registers where the arguments need to end up. |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4853 // Load the format string into x0, as per the procedure-call standard. | 4841 // Load the format string into x0, as per the procedure-call standard. |
| 4854 // | 4842 // |
| 4855 // To make the code as portable as possible, the format string is encoded | 4843 // To make the code as portable as possible, the format string is encoded |
| 4856 // directly in the instruction stream. It might be cleaner to encode it in a | 4844 // directly in the instruction stream. It might be cleaner to encode it in a |
| 4857 // literal pool, but since Printf is usually used for debugging, it is | 4845 // literal pool, but since Printf is usually used for debugging, it is |
| 4858 // beneficial for it to be minimally dependent on other features. | 4846 // beneficial for it to be minimally dependent on other features. |
| 4859 Label format_address; | 4847 Label format_address; |
| 4860 Adr(x0, &format_address); | 4848 Adr(x0, &format_address); |
| 4861 | 4849 |
| 4862 // Emit the format string directly in the instruction stream. | 4850 // Emit the format string directly in the instruction stream. |
| 4863 { BlockConstPoolScope scope(this); | 4851 { BlockPoolsScope scope(this); |
| 4864 Label after_data; | 4852 Label after_data; |
| 4865 B(&after_data); | 4853 B(&after_data); |
| 4866 Bind(&format_address); | 4854 Bind(&format_address); |
| 4867 EmitStringData(format); | 4855 EmitStringData(format); |
| 4868 Unreachable(); | 4856 Unreachable(); |
| 4869 Bind(&after_data); | 4857 Bind(&after_data); |
| 4870 } | 4858 } |
| 4871 | 4859 |
| 4872 // We don't pass any arguments on the stack, but we still need to align the C | 4860 // We don't pass any arguments on the stack, but we still need to align the C |
| 4873 // stack pointer to a 16-byte boundary for PCS compliance. | 4861 // stack pointer to a 16-byte boundary for PCS compliance. |
| (...skipping 18 matching lines...) Expand all Loading... |
| 4892 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); | 4880 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); |
| 4893 #endif | 4881 #endif |
| 4894 } | 4882 } |
| 4895 | 4883 |
| 4896 | 4884 |
| 4897 void MacroAssembler::Printf(const char * format, | 4885 void MacroAssembler::Printf(const char * format, |
| 4898 const CPURegister& arg0, | 4886 const CPURegister& arg0, |
| 4899 const CPURegister& arg1, | 4887 const CPURegister& arg1, |
| 4900 const CPURegister& arg2, | 4888 const CPURegister& arg2, |
| 4901 const CPURegister& arg3) { | 4889 const CPURegister& arg3) { |
| 4890 // Printf is expected to preserve all registers, so make sure that none are |
| 4891 // available as scratch registers until we've preserved them. |
| 4892 RegList old_tmp_list = TmpList()->list(); |
| 4893 RegList old_fp_tmp_list = FPTmpList()->list(); |
| 4894 TmpList()->set_list(0); |
| 4895 FPTmpList()->set_list(0); |
| 4896 |
| 4902 // Preserve all caller-saved registers as well as NZCV. | 4897 // Preserve all caller-saved registers as well as NZCV. |
| 4903 // If csp is the stack pointer, PushCPURegList asserts that the size of each | 4898 // If csp is the stack pointer, PushCPURegList asserts that the size of each |
| 4904 // list is a multiple of 16 bytes. | 4899 // list is a multiple of 16 bytes. |
| 4905 PushCPURegList(kCallerSaved); | 4900 PushCPURegList(kCallerSaved); |
| 4906 PushCPURegList(kCallerSavedFP); | 4901 PushCPURegList(kCallerSavedFP); |
| 4907 // Use Tmp0() as a scratch register. It is not accepted by Printf so it will | 4902 |
| 4908 // never overlap an argument register. | 4903 // We can use caller-saved registers as scratch values (except for argN). |
| 4909 Mrs(Tmp0(), NZCV); | 4904 CPURegList tmp_list = kCallerSaved; |
| 4910 Push(Tmp0(), xzr); | 4905 CPURegList fp_tmp_list = kCallerSavedFP; |
| 4906 tmp_list.Remove(arg0, arg1, arg2, arg3); |
| 4907 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); |
| 4908 TmpList()->set_list(tmp_list.list()); |
| 4909 FPTmpList()->set_list(fp_tmp_list.list()); |
| 4910 |
| 4911 // Preserve NZCV. |
| 4912 { UseScratchRegisterScope temps(this); |
| 4913 Register tmp = temps.AcquireX(); |
| 4914 Mrs(tmp, NZCV); |
| 4915 Push(tmp, xzr); |
| 4916 } |
| 4911 | 4917 |
| 4912 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); | 4918 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); |
| 4913 | 4919 |
| 4914 Pop(xzr, Tmp0()); | 4920 { UseScratchRegisterScope temps(this); |
| 4915 Msr(NZCV, Tmp0()); | 4921 Register tmp = temps.AcquireX(); |
| 4922 Pop(xzr, tmp); |
| 4923 Msr(NZCV, tmp); |
| 4924 } |
| 4925 |
| 4916 PopCPURegList(kCallerSavedFP); | 4926 PopCPURegList(kCallerSavedFP); |
| 4917 PopCPURegList(kCallerSaved); | 4927 PopCPURegList(kCallerSaved); |
| 4928 |
| 4929 TmpList()->set_list(old_tmp_list); |
| 4930 FPTmpList()->set_list(old_fp_tmp_list); |
| 4918 } | 4931 } |
| 4919 | 4932 |
| 4920 | 4933 |
| 4921 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { | 4934 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { |
| 4922 // TODO(jbramley): Other architectures use the internal memcpy to copy the | 4935 // TODO(jbramley): Other architectures use the internal memcpy to copy the |
| 4923 // sequence. If this is a performance bottleneck, we should consider caching | 4936 // sequence. If this is a performance bottleneck, we should consider caching |
| 4924 // the sequence and copying it in the same way. | 4937 // the sequence and copying it in the same way. |
| 4925 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); | 4938 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); |
| 4926 ASSERT(jssp.Is(StackPointer())); | 4939 ASSERT(jssp.Is(StackPointer())); |
| 4927 EmitFrameSetupForCodeAgePatching(this); | 4940 EmitFrameSetupForCodeAgePatching(this); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 4941 | 4954 |
| 4942 | 4955 |
| 4943 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { | 4956 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { |
| 4944 Label start; | 4957 Label start; |
| 4945 __ bind(&start); | 4958 __ bind(&start); |
| 4946 | 4959 |
| 4947 // We can do this sequence using four instructions, but the code ageing | 4960 // We can do this sequence using four instructions, but the code ageing |
| 4948 // sequence that patches it needs five, so we use the extra space to try to | 4961 // sequence that patches it needs five, so we use the extra space to try to |
| 4949 // simplify some addressing modes and remove some dependencies (compared to | 4962 // simplify some addressing modes and remove some dependencies (compared to |
| 4950 // using two stp instructions with write-back). | 4963 // using two stp instructions with write-back). |
| 4951 __ sub(jssp, jssp, 4 * kXRegSizeInBytes); | 4964 __ sub(jssp, jssp, 4 * kXRegSize); |
| 4952 __ sub(csp, csp, 4 * kXRegSizeInBytes); | 4965 __ sub(csp, csp, 4 * kXRegSize); |
| 4953 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes)); | 4966 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize)); |
| 4954 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes)); | 4967 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize)); |
| 4955 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | 4968 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); |
| 4956 | 4969 |
| 4957 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); | 4970 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); |
| 4958 } | 4971 } |
| 4959 | 4972 |
| 4960 | 4973 |
| 4961 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm, | 4974 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm, |
| 4962 Code * stub) { | 4975 Code * stub) { |
| 4963 Label start; | 4976 Label start; |
| 4964 __ bind(&start); | 4977 __ bind(&start); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5012 if (!initialized) { | 5025 if (!initialized) { |
| 5013 PatchingAssembler patcher(old, length); | 5026 PatchingAssembler patcher(old, length); |
| 5014 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL); | 5027 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL); |
| 5015 initialized = true; | 5028 initialized = true; |
| 5016 } | 5029 } |
| 5017 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; | 5030 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; |
| 5018 } | 5031 } |
| 5019 #endif | 5032 #endif |
| 5020 | 5033 |
| 5021 | 5034 |
| 5035 void MacroAssembler::FlooringDiv(Register result, |
| 5036 Register dividend, |
| 5037 int32_t divisor) { |
| 5038 ASSERT(!AreAliased(result, dividend)); |
| 5039 ASSERT(result.Is32Bits() && dividend.Is32Bits()); |
| 5040 MultiplierAndShift ms(divisor); |
| 5041 Mov(result, Operand(ms.multiplier())); |
| 5042 Smull(result.X(), dividend, result); |
| 5043 Asr(result.X(), result.X(), 32); |
| 5044 if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend); |
| 5045 if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend); |
| 5046 if (ms.shift() > 0) Asr(result, result, ms.shift()); |
| 5047 } |
| 5048 |
| 5049 |
| 5022 #undef __ | 5050 #undef __ |
| 5051 |
| 5052 |
| 5053 UseScratchRegisterScope::~UseScratchRegisterScope() { |
| 5054 available_->set_list(old_available_); |
| 5055 availablefp_->set_list(old_availablefp_); |
| 5056 } |
| 5057 |
| 5058 |
| 5059 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { |
| 5060 int code = AcquireNextAvailable(available_).code(); |
| 5061 return Register::Create(code, reg.SizeInBits()); |
| 5062 } |
| 5063 |
| 5064 |
| 5065 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { |
| 5066 int code = AcquireNextAvailable(availablefp_).code(); |
| 5067 return FPRegister::Create(code, reg.SizeInBits()); |
| 5068 } |
| 5069 |
| 5070 |
| 5071 CPURegister UseScratchRegisterScope::AcquireNextAvailable( |
| 5072 CPURegList* available) { |
| 5073 CHECK(!available->IsEmpty()); |
| 5074 CPURegister result = available->PopLowestIndex(); |
| 5075 ASSERT(!AreAliased(result, xzr, csp)); |
| 5076 return result; |
| 5077 } |
| 5078 |
| 5079 |
| 5023 #define __ masm-> | 5080 #define __ masm-> |
| 5024 | 5081 |
| 5025 | 5082 |
| 5026 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, | 5083 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, |
| 5027 const Label* smi_check) { | 5084 const Label* smi_check) { |
| 5028 Assembler::BlockConstPoolScope scope(masm); | 5085 Assembler::BlockPoolsScope scope(masm); |
| 5029 if (reg.IsValid()) { | 5086 if (reg.IsValid()) { |
| 5030 ASSERT(smi_check->is_bound()); | 5087 ASSERT(smi_check->is_bound()); |
| 5031 ASSERT(reg.Is64Bits()); | 5088 ASSERT(reg.Is64Bits()); |
| 5032 | 5089 |
| 5033 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to | 5090 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to |
| 5034 // 'check' in the other bits. The possible offset is limited in that we | 5091 // 'check' in the other bits. The possible offset is limited in that we |
| 5035 // use BitField to pack the data, and the underlying data type is a | 5092 // use BitField to pack the data, and the underlying data type is a |
| 5036 // uint32_t. | 5093 // uint32_t. |
| 5037 uint32_t delta = __ InstructionsGeneratedSince(smi_check); | 5094 uint32_t delta = __ InstructionsGeneratedSince(smi_check); |
| 5038 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); | 5095 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 5064 } | 5121 } |
| 5065 } | 5122 } |
| 5066 | 5123 |
| 5067 | 5124 |
| 5068 #undef __ | 5125 #undef __ |
| 5069 | 5126 |
| 5070 | 5127 |
| 5071 } } // namespace v8::internal | 5128 } } // namespace v8::internal |
| 5072 | 5129 |
| 5073 #endif // V8_TARGET_ARCH_A64 | 5130 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |