Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, | 46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, |
| 47 byte * buffer, | 47 byte * buffer, |
| 48 unsigned buffer_size) | 48 unsigned buffer_size) |
| 49 : Assembler(arg_isolate, buffer, buffer_size), | 49 : Assembler(arg_isolate, buffer, buffer_size), |
| 50 generating_stub_(false), | 50 generating_stub_(false), |
| 51 #if DEBUG | 51 #if DEBUG |
| 52 allow_macro_instructions_(true), | 52 allow_macro_instructions_(true), |
| 53 #endif | 53 #endif |
| 54 has_frame_(false), | 54 has_frame_(false), |
| 55 use_real_aborts_(true), | 55 use_real_aborts_(true), |
| 56 sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) { | 56 sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) { |
| 57 if (isolate() != NULL) { | 57 if (isolate() != NULL) { |
| 58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
| 59 isolate()); | 59 isolate()); |
| 60 } | 60 } |
| 61 } | 61 } |
| 62 | 62 |
| 63 | 63 |
| 64 void MacroAssembler::LogicalMacro(const Register& rd, | 64 void MacroAssembler::LogicalMacro(const Register& rd, |
| 65 const Register& rn, | 65 const Register& rn, |
| 66 const Operand& operand, | 66 const Operand& operand, |
| 67 LogicalOp op) { | 67 LogicalOp op) { |
| 68 UseScratchRegisterScope temps(this); | |
| 69 | |
| 68 if (operand.NeedsRelocation()) { | 70 if (operand.NeedsRelocation()) { |
| 69 LoadRelocated(Tmp0(), operand); | 71 Register temp = temps.AcquireX(); |
| 70 Logical(rd, rn, Tmp0(), op); | 72 LoadRelocated(temp, operand); |
| 73 Logical(rd, rn, temp, op); | |
| 71 | 74 |
| 72 } else if (operand.IsImmediate()) { | 75 } else if (operand.IsImmediate()) { |
| 73 int64_t immediate = operand.immediate(); | 76 int64_t immediate = operand.immediate(); |
| 74 unsigned reg_size = rd.SizeInBits(); | 77 unsigned reg_size = rd.SizeInBits(); |
| 75 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | 78 ASSERT(rd.Is64Bits() || is_uint32(immediate)); |
| 76 | 79 |
| 77 // If the operation is NOT, invert the operation and immediate. | 80 // If the operation is NOT, invert the operation and immediate. |
| 78 if ((op & NOT) == NOT) { | 81 if ((op & NOT) == NOT) { |
| 79 op = static_cast<LogicalOp>(op & ~NOT); | 82 op = static_cast<LogicalOp>(op & ~NOT); |
| 80 immediate = ~immediate; | 83 immediate = ~immediate; |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 118 UNREACHABLE(); | 121 UNREACHABLE(); |
| 119 } | 122 } |
| 120 } | 123 } |
| 121 | 124 |
| 122 unsigned n, imm_s, imm_r; | 125 unsigned n, imm_s, imm_r; |
| 123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { | 126 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { |
| 124 // Immediate can be encoded in the instruction. | 127 // Immediate can be encoded in the instruction. |
| 125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | 128 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); |
| 126 } else { | 129 } else { |
| 127 // Immediate can't be encoded: synthesize using move immediate. | 130 // Immediate can't be encoded: synthesize using move immediate. |
| 128 Register temp = AppropriateTempFor(rn); | 131 Register temp = temps.AcquireSameSizeAs(rn); |
| 129 Mov(temp, immediate); | 132 Mov(temp, immediate); |
| 130 if (rd.Is(csp)) { | 133 if (rd.Is(csp)) { |
| 131 // If rd is the stack pointer we cannot use it as the destination | 134 // If rd is the stack pointer we cannot use it as the destination |
| 132 // register so we use the temp register as an intermediate again. | 135 // register so we use the temp register as an intermediate again. |
| 133 Logical(temp, rn, temp, op); | 136 Logical(temp, rn, temp, op); |
| 134 Mov(csp, temp); | 137 Mov(csp, temp); |
| 135 } else { | 138 } else { |
| 136 Logical(rd, rn, temp, op); | 139 Logical(rd, rn, temp, op); |
| 137 } | 140 } |
| 138 } | 141 } |
| 139 | 142 |
| 140 } else if (operand.IsExtendedRegister()) { | 143 } else if (operand.IsExtendedRegister()) { |
| 141 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 144 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
| 142 // Add/sub extended supports shift <= 4. We want to support exactly the | 145 // Add/sub extended supports shift <= 4. We want to support exactly the |
| 143 // same modes here. | 146 // same modes here. |
| 144 ASSERT(operand.shift_amount() <= 4); | 147 ASSERT(operand.shift_amount() <= 4); |
| 145 ASSERT(operand.reg().Is64Bits() || | 148 ASSERT(operand.reg().Is64Bits() || |
| 146 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | 149 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
| 147 Register temp = AppropriateTempFor(rn, operand.reg()); | 150 Register temp = temps.AcquireSameSizeAs(rn); |
| 148 EmitExtendShift(temp, operand.reg(), operand.extend(), | 151 EmitExtendShift(temp, operand.reg(), operand.extend(), |
| 149 operand.shift_amount()); | 152 operand.shift_amount()); |
| 150 Logical(rd, rn, temp, op); | 153 Logical(rd, rn, temp, op); |
| 151 | 154 |
| 152 } else { | 155 } else { |
| 153 // The operand can be encoded in the instruction. | 156 // The operand can be encoded in the instruction. |
| 154 ASSERT(operand.IsShiftedRegister()); | 157 ASSERT(operand.IsShiftedRegister()); |
| 155 Logical(rd, rn, operand, op); | 158 Logical(rd, rn, operand, op); |
| 156 } | 159 } |
| 157 } | 160 } |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 201 uint64_t ignored_halfword = 0; | 204 uint64_t ignored_halfword = 0; |
| 202 bool invert_move = false; | 205 bool invert_move = false; |
| 203 // If the number of 0xffff halfwords is greater than the number of 0x0000 | 206 // If the number of 0xffff halfwords is greater than the number of 0x0000 |
| 204 // halfwords, it's more efficient to use move-inverted. | 207 // halfwords, it's more efficient to use move-inverted. |
| 205 if (CountClearHalfWords(~imm, reg_size) > | 208 if (CountClearHalfWords(~imm, reg_size) > |
| 206 CountClearHalfWords(imm, reg_size)) { | 209 CountClearHalfWords(imm, reg_size)) { |
| 207 ignored_halfword = 0xffffL; | 210 ignored_halfword = 0xffffL; |
| 208 invert_move = true; | 211 invert_move = true; |
| 209 } | 212 } |
| 210 | 213 |
| 211 // Mov instructions can't move value into the stack pointer, so set up a | 214 // Mov instructions can't move immediate values into the stack pointer, so |
| 212 // temporary register, if needed. | 215 // set up a temporary register, if needed. |
| 213 Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd; | 216 UseScratchRegisterScope temps(this); |
| 217 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; | |
| 214 | 218 |
| 215 // Iterate through the halfwords. Use movn/movz for the first non-ignored | 219 // Iterate through the halfwords. Use movn/movz for the first non-ignored |
| 216 // halfword, and movk for subsequent halfwords. | 220 // halfword, and movk for subsequent halfwords. |
| 217 ASSERT((reg_size % 16) == 0); | 221 ASSERT((reg_size % 16) == 0); |
| 218 bool first_mov_done = false; | 222 bool first_mov_done = false; |
| 219 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { | 223 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { |
| 220 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; | 224 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; |
| 221 if (imm16 != ignored_halfword) { | 225 if (imm16 != ignored_halfword) { |
| 222 if (!first_mov_done) { | 226 if (!first_mov_done) { |
| 223 if (invert_move) { | 227 if (invert_move) { |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 241 } | 245 } |
| 242 } | 246 } |
| 243 } | 247 } |
| 244 | 248 |
| 245 | 249 |
| 246 void MacroAssembler::Mov(const Register& rd, | 250 void MacroAssembler::Mov(const Register& rd, |
| 247 const Operand& operand, | 251 const Operand& operand, |
| 248 DiscardMoveMode discard_mode) { | 252 DiscardMoveMode discard_mode) { |
| 249 ASSERT(allow_macro_instructions_); | 253 ASSERT(allow_macro_instructions_); |
| 250 ASSERT(!rd.IsZero()); | 254 ASSERT(!rd.IsZero()); |
| 255 | |
| 251 // Provide a swap register for instructions that need to write into the | 256 // Provide a swap register for instructions that need to write into the |
| 252 // system stack pointer (and can't do this inherently). | 257 // system stack pointer (and can't do this inherently). |
| 253 Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd); | 258 UseScratchRegisterScope temps(this); |
| 259 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; | |
| 254 | 260 |
| 255 if (operand.NeedsRelocation()) { | 261 if (operand.NeedsRelocation()) { |
| 256 LoadRelocated(dst, operand); | 262 LoadRelocated(dst, operand); |
| 257 | 263 |
| 258 } else if (operand.IsImmediate()) { | 264 } else if (operand.IsImmediate()) { |
| 259 // Call the macro assembler for generic immediates. | 265 // Call the macro assembler for generic immediates. |
| 260 Mov(dst, operand.immediate()); | 266 Mov(dst, operand.immediate()); |
| 261 | 267 |
| 262 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 268 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
| 263 // Emit a shift instruction if moving a shifted register. This operation | 269 // Emit a shift instruction if moving a shifted register. This operation |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 284 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && | 290 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && |
| 285 (discard_mode == kDontDiscardForSameWReg))) { | 291 (discard_mode == kDontDiscardForSameWReg))) { |
| 286 Assembler::mov(rd, operand.reg()); | 292 Assembler::mov(rd, operand.reg()); |
| 287 } | 293 } |
| 288 // This case can handle writes into the system stack pointer directly. | 294 // This case can handle writes into the system stack pointer directly. |
| 289 dst = rd; | 295 dst = rd; |
| 290 } | 296 } |
| 291 | 297 |
| 292 // Copy the result to the system stack pointer. | 298 // Copy the result to the system stack pointer. |
| 293 if (!dst.Is(rd)) { | 299 if (!dst.Is(rd)) { |
| 294 ASSERT(rd.IsZero()); | 300 ASSERT(rd.IsSP()); |
| 295 ASSERT(dst.Is(Tmp1())); | |
| 296 Assembler::mov(rd, dst); | 301 Assembler::mov(rd, dst); |
| 297 } | 302 } |
| 298 } | 303 } |
| 299 | 304 |
| 300 | 305 |
| 301 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { | 306 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { |
| 302 ASSERT(allow_macro_instructions_); | 307 ASSERT(allow_macro_instructions_); |
| 303 | 308 |
| 304 if (operand.NeedsRelocation()) { | 309 if (operand.NeedsRelocation()) { |
| 305 LoadRelocated(Tmp0(), operand); | 310 LoadRelocated(rd, operand); |
| 306 Mvn(rd, Tmp0()); | 311 mvn(rd, rd); |
| 307 | 312 |
| 308 } else if (operand.IsImmediate()) { | 313 } else if (operand.IsImmediate()) { |
| 309 // Call the macro assembler for generic immediates. | 314 // Call the macro assembler for generic immediates. |
| 310 Mov(rd, ~operand.immediate()); | 315 Mov(rd, ~operand.immediate()); |
| 311 | 316 |
| 312 } else if (operand.IsExtendedRegister()) { | 317 } else if (operand.IsExtendedRegister()) { |
| 313 // Emit two instructions for the extend case. This differs from Mov, as | 318 // Emit two instructions for the extend case. This differs from Mov, as |
| 314 // the extend and invert can't be achieved in one instruction. | 319 // the extend and invert can't be achieved in one instruction. |
| 315 Register temp = AppropriateTempFor(rd, operand.reg()); | 320 EmitExtendShift(rd, operand.reg(), operand.extend(), |
| 316 EmitExtendShift(temp, operand.reg(), operand.extend(), | |
| 317 operand.shift_amount()); | 321 operand.shift_amount()); |
| 318 mvn(rd, temp); | 322 mvn(rd, rd); |
| 319 | 323 |
| 320 } else { | 324 } else { |
| 321 // Otherwise, emit a register move only if the registers are distinct. | |
| 322 // If the jssp is an operand, add #0 is emitted, otherwise, orr #0. | |
| 323 mvn(rd, operand); | 325 mvn(rd, operand); |
| 324 } | 326 } |
| 325 } | 327 } |
| 326 | 328 |
| 327 | 329 |
| 328 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { | 330 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { |
| 329 ASSERT((reg_size % 8) == 0); | 331 ASSERT((reg_size % 8) == 0); |
| 330 int count = 0; | 332 int count = 0; |
| 331 for (unsigned i = 0; i < (reg_size / 16); i++) { | 333 for (unsigned i = 0; i < (reg_size / 16); i++) { |
| 332 if ((imm & 0xffff) == 0) { | 334 if ((imm & 0xffff) == 0) { |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 353 } | 355 } |
| 354 | 356 |
| 355 | 357 |
| 356 void MacroAssembler::ConditionalCompareMacro(const Register& rn, | 358 void MacroAssembler::ConditionalCompareMacro(const Register& rn, |
| 357 const Operand& operand, | 359 const Operand& operand, |
| 358 StatusFlags nzcv, | 360 StatusFlags nzcv, |
| 359 Condition cond, | 361 Condition cond, |
| 360 ConditionalCompareOp op) { | 362 ConditionalCompareOp op) { |
| 361 ASSERT((cond != al) && (cond != nv)); | 363 ASSERT((cond != al) && (cond != nv)); |
| 362 if (operand.NeedsRelocation()) { | 364 if (operand.NeedsRelocation()) { |
| 363 LoadRelocated(Tmp0(), operand); | 365 UseScratchRegisterScope temps(this); |
| 364 ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op); | 366 Register temp = temps.AcquireX(); |
| 367 LoadRelocated(temp, operand); | |
| 368 ConditionalCompareMacro(rn, temp, nzcv, cond, op); | |
| 365 | 369 |
| 366 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || | 370 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || |
| 367 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { | 371 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { |
| 368 // The immediate can be encoded in the instruction, or the operand is an | 372 // The immediate can be encoded in the instruction, or the operand is an |
| 369 // unshifted register: call the assembler. | 373 // unshifted register: call the assembler. |
| 370 ConditionalCompare(rn, operand, nzcv, cond, op); | 374 ConditionalCompare(rn, operand, nzcv, cond, op); |
| 371 | 375 |
| 372 } else { | 376 } else { |
| 373 // The operand isn't directly supported by the instruction: perform the | 377 // The operand isn't directly supported by the instruction: perform the |
| 374 // operation on a temporary register. | 378 // operation on a temporary register. |
| 375 Register temp = AppropriateTempFor(rn); | 379 UseScratchRegisterScope temps(this); |
| 380 Register temp = temps.AcquireSameSizeAs(rn); | |
| 376 Mov(temp, operand); | 381 Mov(temp, operand); |
| 377 ConditionalCompare(rn, temp, nzcv, cond, op); | 382 ConditionalCompare(rn, temp, nzcv, cond, op); |
| 378 } | 383 } |
| 379 } | 384 } |
| 380 | 385 |
| 381 | 386 |
| 382 void MacroAssembler::Csel(const Register& rd, | 387 void MacroAssembler::Csel(const Register& rd, |
| 383 const Register& rn, | 388 const Register& rn, |
| 384 const Operand& operand, | 389 const Operand& operand, |
| 385 Condition cond) { | 390 Condition cond) { |
| 386 ASSERT(allow_macro_instructions_); | 391 ASSERT(allow_macro_instructions_); |
| 387 ASSERT(!rd.IsZero()); | 392 ASSERT(!rd.IsZero()); |
| 388 ASSERT((cond != al) && (cond != nv)); | 393 ASSERT((cond != al) && (cond != nv)); |
| 389 if (operand.IsImmediate()) { | 394 if (operand.IsImmediate()) { |
| 390 // Immediate argument. Handle special cases of 0, 1 and -1 using zero | 395 // Immediate argument. Handle special cases of 0, 1 and -1 using zero |
| 391 // register. | 396 // register. |
| 392 int64_t imm = operand.immediate(); | 397 int64_t imm = operand.immediate(); |
| 393 Register zr = AppropriateZeroRegFor(rn); | 398 Register zr = AppropriateZeroRegFor(rn); |
| 394 if (imm == 0) { | 399 if (imm == 0) { |
| 395 csel(rd, rn, zr, cond); | 400 csel(rd, rn, zr, cond); |
| 396 } else if (imm == 1) { | 401 } else if (imm == 1) { |
| 397 csinc(rd, rn, zr, cond); | 402 csinc(rd, rn, zr, cond); |
| 398 } else if (imm == -1) { | 403 } else if (imm == -1) { |
| 399 csinv(rd, rn, zr, cond); | 404 csinv(rd, rn, zr, cond); |
| 400 } else { | 405 } else { |
| 401 Register temp = AppropriateTempFor(rn); | 406 UseScratchRegisterScope temps(this); |
| 407 Register temp = temps.AcquireSameSizeAs(rn); | |
| 402 Mov(temp, operand.immediate()); | 408 Mov(temp, operand.immediate()); |
| 403 csel(rd, rn, temp, cond); | 409 csel(rd, rn, temp, cond); |
| 404 } | 410 } |
| 405 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { | 411 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { |
| 406 // Unshifted register argument. | 412 // Unshifted register argument. |
| 407 csel(rd, rn, operand.reg(), cond); | 413 csel(rd, rn, operand.reg(), cond); |
| 408 } else { | 414 } else { |
| 409 // All other arguments. | 415 // All other arguments. |
| 410 Register temp = AppropriateTempFor(rn); | 416 UseScratchRegisterScope temps(this); |
| 417 Register temp = temps.AcquireSameSizeAs(rn); | |
| 411 Mov(temp, operand); | 418 Mov(temp, operand); |
| 412 csel(rd, rn, temp, cond); | 419 csel(rd, rn, temp, cond); |
| 413 } | 420 } |
| 414 } | 421 } |
| 415 | 422 |
| 416 | 423 |
| 417 void MacroAssembler::AddSubMacro(const Register& rd, | 424 void MacroAssembler::AddSubMacro(const Register& rd, |
| 418 const Register& rn, | 425 const Register& rn, |
| 419 const Operand& operand, | 426 const Operand& operand, |
| 420 FlagsUpdate S, | 427 FlagsUpdate S, |
| 421 AddSubOp op) { | 428 AddSubOp op) { |
| 422 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && | 429 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && |
| 423 !operand.NeedsRelocation() && (S == LeaveFlags)) { | 430 !operand.NeedsRelocation() && (S == LeaveFlags)) { |
| 424 // The instruction would be a nop. Avoid generating useless code. | 431 // The instruction would be a nop. Avoid generating useless code. |
| 425 return; | 432 return; |
| 426 } | 433 } |
| 427 | 434 |
| 428 if (operand.NeedsRelocation()) { | 435 if (operand.NeedsRelocation()) { |
| 429 LoadRelocated(Tmp0(), operand); | 436 UseScratchRegisterScope temps(this); |
| 430 AddSubMacro(rd, rn, Tmp0(), S, op); | 437 Register temp = temps.AcquireX(); |
| 438 LoadRelocated(temp, operand); | |
| 439 AddSubMacro(rd, rn, temp, S, op); | |
| 431 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || | 440 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || |
| 432 (rn.IsZero() && !operand.IsShiftedRegister()) || | 441 (rn.IsZero() && !operand.IsShiftedRegister()) || |
| 433 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 442 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
| 434 Register temp = AppropriateTempFor(rn); | 443 UseScratchRegisterScope temps(this); |
| 444 Register temp = temps.AcquireSameSizeAs(rn); | |
| 435 Mov(temp, operand); | 445 Mov(temp, operand); |
| 436 AddSub(rd, rn, temp, S, op); | 446 AddSub(rd, rn, temp, S, op); |
| 437 } else { | 447 } else { |
| 438 AddSub(rd, rn, operand, S, op); | 448 AddSub(rd, rn, operand, S, op); |
| 439 } | 449 } |
| 440 } | 450 } |
| 441 | 451 |
| 442 | 452 |
| 443 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, | 453 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, |
| 444 const Register& rn, | 454 const Register& rn, |
| 445 const Operand& operand, | 455 const Operand& operand, |
| 446 FlagsUpdate S, | 456 FlagsUpdate S, |
| 447 AddSubWithCarryOp op) { | 457 AddSubWithCarryOp op) { |
| 448 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 458 ASSERT(rd.SizeInBits() == rn.SizeInBits()); |
| 459 UseScratchRegisterScope temps(this); | |
| 449 | 460 |
| 450 if (operand.NeedsRelocation()) { | 461 if (operand.NeedsRelocation()) { |
| 451 LoadRelocated(Tmp0(), operand); | 462 Register temp = temps.AcquireX(); |
| 452 AddSubWithCarryMacro(rd, rn, Tmp0(), S, op); | 463 LoadRelocated(temp, operand); |
| 464 AddSubWithCarryMacro(rd, rn, temp, S, op); | |
| 453 | 465 |
| 454 } else if (operand.IsImmediate() || | 466 } else if (operand.IsImmediate() || |
| 455 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 467 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
| 456 // Add/sub with carry (immediate or ROR shifted register.) | 468 // Add/sub with carry (immediate or ROR shifted register.) |
| 457 Register temp = AppropriateTempFor(rn); | 469 Register temp = temps.AcquireSameSizeAs(rn); |
| 458 Mov(temp, operand); | 470 Mov(temp, operand); |
| 459 AddSubWithCarry(rd, rn, temp, S, op); | 471 AddSubWithCarry(rd, rn, temp, S, op); |
| 472 | |
| 460 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 473 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
| 461 // Add/sub with carry (shifted register). | 474 // Add/sub with carry (shifted register). |
| 462 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | 475 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); |
| 463 ASSERT(operand.shift() != ROR); | 476 ASSERT(operand.shift() != ROR); |
| 464 ASSERT(is_uintn(operand.shift_amount(), | 477 ASSERT( |
| 465 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2)); | 478 is_uintn(operand.shift_amount(), |
| 466 Register temp = AppropriateTempFor(rn, operand.reg()); | 479 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2)); |
| 480 Register temp = temps.AcquireSameSizeAs(rn); | |
| 467 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); | 481 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); |
| 468 AddSubWithCarry(rd, rn, temp, S, op); | 482 AddSubWithCarry(rd, rn, temp, S, op); |
| 469 | 483 |
| 470 } else if (operand.IsExtendedRegister()) { | 484 } else if (operand.IsExtendedRegister()) { |
| 471 // Add/sub with carry (extended register). | 485 // Add/sub with carry (extended register). |
| 472 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 486 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
| 473 // Add/sub extended supports a shift <= 4. We want to support exactly the | 487 // Add/sub extended supports a shift <= 4. We want to support exactly the |
| 474 // same modes. | 488 // same modes. |
| 475 ASSERT(operand.shift_amount() <= 4); | 489 ASSERT(operand.shift_amount() <= 4); |
| 476 ASSERT(operand.reg().Is64Bits() || | 490 ASSERT(operand.reg().Is64Bits() || |
| 477 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | 491 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
| 478 Register temp = AppropriateTempFor(rn, operand.reg()); | 492 Register temp = temps.AcquireSameSizeAs(rn); |
| 479 EmitExtendShift(temp, operand.reg(), operand.extend(), | 493 EmitExtendShift(temp, operand.reg(), operand.extend(), |
| 480 operand.shift_amount()); | 494 operand.shift_amount()); |
| 481 AddSubWithCarry(rd, rn, temp, S, op); | 495 AddSubWithCarry(rd, rn, temp, S, op); |
| 482 | 496 |
| 483 } else { | 497 } else { |
| 484 // The addressing mode is directly supported by the instruction. | 498 // The addressing mode is directly supported by the instruction. |
| 485 AddSubWithCarry(rd, rn, operand, S, op); | 499 AddSubWithCarry(rd, rn, operand, S, op); |
| 486 } | 500 } |
| 487 } | 501 } |
| 488 | 502 |
| 489 | 503 |
| 490 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, | 504 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, |
| 491 const MemOperand& addr, | 505 const MemOperand& addr, |
| 492 LoadStoreOp op) { | 506 LoadStoreOp op) { |
| 493 int64_t offset = addr.offset(); | 507 int64_t offset = addr.offset(); |
| 494 LSDataSize size = CalcLSDataSize(op); | 508 LSDataSize size = CalcLSDataSize(op); |
| 495 | 509 |
| 496 // Check if an immediate offset fits in the immediate field of the | 510 // Check if an immediate offset fits in the immediate field of the |
| 497 // appropriate instruction. If not, emit two instructions to perform | 511 // appropriate instruction. If not, emit two instructions to perform |
| 498 // the operation. | 512 // the operation. |
| 499 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && | 513 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && |
| 500 !IsImmLSUnscaled(offset)) { | 514 !IsImmLSUnscaled(offset)) { |
| 501 // Immediate offset that can't be encoded using unsigned or unscaled | 515 // Immediate offset that can't be encoded using unsigned or unscaled |
| 502 // addressing modes. | 516 // addressing modes. |
| 503 Register temp = AppropriateTempFor(addr.base()); | 517 UseScratchRegisterScope temps(this); |
| 518 Register temp = temps.AcquireSameSizeAs(addr.base()); | |
| 504 Mov(temp, addr.offset()); | 519 Mov(temp, addr.offset()); |
| 505 LoadStore(rt, MemOperand(addr.base(), temp), op); | 520 LoadStore(rt, MemOperand(addr.base(), temp), op); |
| 506 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { | 521 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { |
| 507 // Post-index beyond unscaled addressing range. | 522 // Post-index beyond unscaled addressing range. |
| 508 LoadStore(rt, MemOperand(addr.base()), op); | 523 LoadStore(rt, MemOperand(addr.base()), op); |
| 509 add(addr.base(), addr.base(), offset); | 524 add(addr.base(), addr.base(), offset); |
| 510 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { | 525 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { |
| 511 // Pre-index beyond unscaled addressing range. | 526 // Pre-index beyond unscaled addressing range. |
| 512 add(addr.base(), addr.base(), offset); | 527 add(addr.base(), addr.base(), offset); |
| 513 LoadStore(rt, MemOperand(addr.base()), op); | 528 LoadStore(rt, MemOperand(addr.base()), op); |
| (...skipping 422 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 936 } | 951 } |
| 937 } | 952 } |
| 938 | 953 |
| 939 | 954 |
| 940 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { | 955 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { |
| 941 int size = src.SizeInBytes(); | 956 int size = src.SizeInBytes(); |
| 942 | 957 |
| 943 PrepareForPush(count, size); | 958 PrepareForPush(count, size); |
| 944 | 959 |
| 945 if (FLAG_optimize_for_size && count > 8) { | 960 if (FLAG_optimize_for_size && count > 8) { |
| 961 UseScratchRegisterScope temps(this); | |
| 962 Register temp = temps.AcquireX(); | |
| 963 | |
| 946 Label loop; | 964 Label loop; |
| 947 __ Mov(Tmp0(), count / 2); | 965 __ Mov(temp, count / 2); |
| 948 __ Bind(&loop); | 966 __ Bind(&loop); |
| 949 PushHelper(2, size, src, src, NoReg, NoReg); | 967 PushHelper(2, size, src, src, NoReg, NoReg); |
| 950 __ Subs(Tmp0(), Tmp0(), 1); | 968 __ Subs(temp, temp, 1); |
| 951 __ B(ne, &loop); | 969 __ B(ne, &loop); |
| 952 | 970 |
| 953 count %= 2; | 971 count %= 2; |
| 954 } | 972 } |
| 955 | 973 |
| 956 // Push up to four registers at a time if possible because if the current | 974 // Push up to four registers at a time if possible because if the current |
| 957 // stack pointer is csp and the register size is 32, registers must be pushed | 975 // stack pointer is csp and the register size is 32, registers must be pushed |
| 958 // in blocks of four in order to maintain the 16-byte alignment for csp. | 976 // in blocks of four in order to maintain the 16-byte alignment for csp. |
| 959 while (count >= 4) { | 977 while (count >= 4) { |
| 960 PushHelper(4, size, src, src, src, src); | 978 PushHelper(4, size, src, src, src, src); |
| 961 count -= 4; | 979 count -= 4; |
| 962 } | 980 } |
| 963 if (count >= 2) { | 981 if (count >= 2) { |
| 964 PushHelper(2, size, src, src, NoReg, NoReg); | 982 PushHelper(2, size, src, src, NoReg, NoReg); |
| 965 count -= 2; | 983 count -= 2; |
| 966 } | 984 } |
| 967 if (count == 1) { | 985 if (count == 1) { |
| 968 PushHelper(1, size, src, NoReg, NoReg, NoReg); | 986 PushHelper(1, size, src, NoReg, NoReg, NoReg); |
| 969 count -= 1; | 987 count -= 1; |
| 970 } | 988 } |
| 971 ASSERT(count == 0); | 989 ASSERT(count == 0); |
| 972 } | 990 } |
| 973 | 991 |
| 974 | 992 |
| 975 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { | 993 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { |
| 976 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); | 994 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); |
| 977 | 995 |
| 978 Register temp = AppropriateTempFor(count); | 996 UseScratchRegisterScope temps(this); |
| 997 Register temp = temps.AcquireSameSizeAs(count); | |
| 979 | 998 |
| 980 if (FLAG_optimize_for_size) { | 999 if (FLAG_optimize_for_size) { |
| 981 Label loop, done; | 1000 Label loop, done; |
| 982 | 1001 |
| 983 Subs(temp, count, 1); | 1002 Subs(temp, count, 1); |
| 984 B(mi, &done); | 1003 B(mi, &done); |
| 985 | 1004 |
| 986 // Push all registers individually, to save code size. | 1005 // Push all registers individually, to save code size. |
| 987 Bind(&loop); | 1006 Bind(&loop); |
| 988 Subs(temp, temp, 1); | 1007 Subs(temp, temp, 1); |
| (...skipping 423 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1412 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); | 1431 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); |
| 1413 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); | 1432 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); |
| 1414 Br(scratch1); | 1433 Br(scratch1); |
| 1415 } | 1434 } |
| 1416 | 1435 |
| 1417 | 1436 |
| 1418 void MacroAssembler::InNewSpace(Register object, | 1437 void MacroAssembler::InNewSpace(Register object, |
| 1419 Condition cond, | 1438 Condition cond, |
| 1420 Label* branch) { | 1439 Label* branch) { |
| 1421 ASSERT(cond == eq || cond == ne); | 1440 ASSERT(cond == eq || cond == ne); |
| 1422 // Use Tmp1() to have a different destination register, as Tmp0() will be used | 1441 UseScratchRegisterScope temps(this); |
| 1423 // for relocation. | 1442 Register temp = temps.AcquireX(); |
| 1424 And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate()))); | 1443 And(temp, object, Operand(ExternalReference::new_space_mask(isolate()))); |
| 1425 Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate()))); | 1444 Cmp(temp, Operand(ExternalReference::new_space_start(isolate()))); |
| 1426 B(cond, branch); | 1445 B(cond, branch); |
| 1427 } | 1446 } |
| 1428 | 1447 |
| 1429 | 1448 |
| 1430 void MacroAssembler::Throw(Register value, | 1449 void MacroAssembler::Throw(Register value, |
| 1431 Register scratch1, | 1450 Register scratch1, |
| 1432 Register scratch2, | 1451 Register scratch2, |
| 1433 Register scratch3, | 1452 Register scratch3, |
| 1434 Register scratch4) { | 1453 Register scratch4) { |
| 1435 // Adjust this code if not the case. | 1454 // Adjust this code if not the case. |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1584 | 1603 |
| 1585 void MacroAssembler::AssertName(Register object) { | 1604 void MacroAssembler::AssertName(Register object) { |
| 1586 if (emit_debug_code()) { | 1605 if (emit_debug_code()) { |
| 1587 STATIC_ASSERT(kSmiTag == 0); | 1606 STATIC_ASSERT(kSmiTag == 0); |
| 1588 // TODO(jbramley): Add AbortIfSmi and related functions. | 1607 // TODO(jbramley): Add AbortIfSmi and related functions. |
| 1589 Label not_smi; | 1608 Label not_smi; |
| 1590 JumpIfNotSmi(object, ¬_smi); | 1609 JumpIfNotSmi(object, ¬_smi); |
| 1591 Abort(kOperandIsASmiAndNotAName); | 1610 Abort(kOperandIsASmiAndNotAName); |
| 1592 Bind(¬_smi); | 1611 Bind(¬_smi); |
| 1593 | 1612 |
| 1594 Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset)); | 1613 UseScratchRegisterScope temps(this); |
| 1595 CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE); | 1614 Register temp = temps.AcquireX(); |
| 1615 | |
| 1616 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 1617 CompareInstanceType(temp, temp, LAST_NAME_TYPE); | |
| 1596 Check(ls, kOperandIsNotAName); | 1618 Check(ls, kOperandIsNotAName); |
| 1597 } | 1619 } |
| 1598 } | 1620 } |
| 1599 | 1621 |
| 1600 | 1622 |
| 1601 void MacroAssembler::AssertString(Register object) { | 1623 void MacroAssembler::AssertString(Register object) { |
| 1602 if (emit_debug_code()) { | 1624 if (emit_debug_code()) { |
| 1603 Register temp = Tmp1(); | 1625 UseScratchRegisterScope temps(this); |
| 1626 Register temp = temps.AcquireX(); | |
| 1604 STATIC_ASSERT(kSmiTag == 0); | 1627 STATIC_ASSERT(kSmiTag == 0); |
| 1605 Tst(object, kSmiTagMask); | 1628 Tst(object, kSmiTagMask); |
| 1606 Check(ne, kOperandIsASmiAndNotAString); | 1629 Check(ne, kOperandIsASmiAndNotAString); |
| 1607 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 1630 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1608 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | 1631 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); |
| 1609 Check(lo, kOperandIsNotAString); | 1632 Check(lo, kOperandIsNotAString); |
| 1610 } | 1633 } |
| 1611 } | 1634 } |
| 1612 | 1635 |
| 1613 | 1636 |
| (...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1910 | 1933 |
| 1911 void MacroAssembler::CallCFunction(ExternalReference function, | 1934 void MacroAssembler::CallCFunction(ExternalReference function, |
| 1912 int num_of_reg_args) { | 1935 int num_of_reg_args) { |
| 1913 CallCFunction(function, num_of_reg_args, 0); | 1936 CallCFunction(function, num_of_reg_args, 0); |
| 1914 } | 1937 } |
| 1915 | 1938 |
| 1916 | 1939 |
| 1917 void MacroAssembler::CallCFunction(ExternalReference function, | 1940 void MacroAssembler::CallCFunction(ExternalReference function, |
| 1918 int num_of_reg_args, | 1941 int num_of_reg_args, |
| 1919 int num_of_double_args) { | 1942 int num_of_double_args) { |
| 1920 Mov(Tmp0(), Operand(function)); | 1943 UseScratchRegisterScope temps(this); |
| 1921 CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args); | 1944 Register temp = temps.AcquireX(); |
| 1945 Mov(temp, Operand(function)); | |
| 1946 CallCFunction(temp, num_of_reg_args, num_of_double_args); | |
| 1922 } | 1947 } |
| 1923 | 1948 |
| 1924 | 1949 |
| 1925 void MacroAssembler::CallCFunction(Register function, | 1950 void MacroAssembler::CallCFunction(Register function, |
| 1926 int num_of_reg_args, | 1951 int num_of_reg_args, |
| 1927 int num_of_double_args) { | 1952 int num_of_double_args) { |
| 1928 ASSERT(has_frame()); | 1953 ASSERT(has_frame()); |
| 1929 // We can pass 8 integer arguments in registers. If we need to pass more than | 1954 // We can pass 8 integer arguments in registers. If we need to pass more than |
| 1930 // that, we'll need to implement support for passing them on the stack. | 1955 // that, we'll need to implement support for passing them on the stack. |
| 1931 ASSERT(num_of_reg_args <= 8); | 1956 ASSERT(num_of_reg_args <= 8); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1964 | 1989 |
| 1965 // Call directly. The function called cannot cause a GC, or allow preemption, | 1990 // Call directly. The function called cannot cause a GC, or allow preemption, |
| 1966 // so the return address in the link register stays correct. | 1991 // so the return address in the link register stays correct. |
| 1967 Call(function); | 1992 Call(function); |
| 1968 | 1993 |
| 1969 if (!csp.Is(old_stack_pointer)) { | 1994 if (!csp.Is(old_stack_pointer)) { |
| 1970 if (emit_debug_code()) { | 1995 if (emit_debug_code()) { |
| 1971 // Because the stack pointer must be aligned on a 16-byte boundary, the | 1996 // Because the stack pointer must be aligned on a 16-byte boundary, the |
| 1972 // aligned csp can be up to 12 bytes below the jssp. This is the case | 1997 // aligned csp can be up to 12 bytes below the jssp. This is the case |
| 1973 // where we only pushed one W register on top of an aligned jssp. | 1998 // where we only pushed one W register on top of an aligned jssp. |
| 1974 Register temp = Tmp1(); | 1999 UseScratchRegisterScope temps(this); |
| 2000 Register temp = temps.AcquireX(); | |
| 1975 ASSERT(ActivationFrameAlignment() == 16); | 2001 ASSERT(ActivationFrameAlignment() == 16); |
| 1976 Sub(temp, csp, old_stack_pointer); | 2002 Sub(temp, csp, old_stack_pointer); |
| 1977 // We want temp <= 0 && temp >= -12. | 2003 // We want temp <= 0 && temp >= -12. |
| 1978 Cmp(temp, 0); | 2004 Cmp(temp, 0); |
| 1979 Ccmp(temp, -12, NFlag, le); | 2005 Ccmp(temp, -12, NFlag, le); |
| 1980 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); | 2006 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); |
| 1981 } | 2007 } |
| 1982 SetStackPointer(old_stack_pointer); | 2008 SetStackPointer(old_stack_pointer); |
| 1983 } | 2009 } |
| 1984 } | 2010 } |
| 1985 | 2011 |
| 1986 | 2012 |
| 1987 void MacroAssembler::Jump(Register target) { | 2013 void MacroAssembler::Jump(Register target) { |
| 1988 Br(target); | 2014 Br(target); |
| 1989 } | 2015 } |
| 1990 | 2016 |
| 1991 | 2017 |
| 1992 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { | 2018 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { |
| 1993 Mov(Tmp0(), Operand(target, rmode)); | 2019 UseScratchRegisterScope temps(this); |
| 1994 Br(Tmp0()); | 2020 Register temp = temps.AcquireX(); |
| 2021 Mov(temp, Operand(target, rmode)); | |
| 2022 Br(temp); | |
| 1995 } | 2023 } |
| 1996 | 2024 |
| 1997 | 2025 |
| 1998 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { | 2026 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { |
| 1999 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | 2027 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
| 2000 Jump(reinterpret_cast<intptr_t>(target), rmode); | 2028 Jump(reinterpret_cast<intptr_t>(target), rmode); |
| 2001 } | 2029 } |
| 2002 | 2030 |
| 2003 | 2031 |
| 2004 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { | 2032 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2046 Label start_call; | 2074 Label start_call; |
| 2047 Bind(&start_call); | 2075 Bind(&start_call); |
| 2048 #endif | 2076 #endif |
| 2049 // Statement positions are expected to be recorded when the target | 2077 // Statement positions are expected to be recorded when the target |
| 2050 // address is loaded. | 2078 // address is loaded. |
| 2051 positions_recorder()->WriteRecordedPositions(); | 2079 positions_recorder()->WriteRecordedPositions(); |
| 2052 | 2080 |
| 2053 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | 2081 // Addresses always have 64 bits, so we shouldn't encounter NONE32. |
| 2054 ASSERT(rmode != RelocInfo::NONE32); | 2082 ASSERT(rmode != RelocInfo::NONE32); |
| 2055 | 2083 |
| 2084 UseScratchRegisterScope temps(this); | |
| 2085 Register temp = temps.AcquireX(); | |
| 2086 | |
| 2056 if (rmode == RelocInfo::NONE64) { | 2087 if (rmode == RelocInfo::NONE64) { |
| 2057 uint64_t imm = reinterpret_cast<uint64_t>(target); | 2088 uint64_t imm = reinterpret_cast<uint64_t>(target); |
| 2058 movz(Tmp0(), (imm >> 0) & 0xffff, 0); | 2089 movz(temp, (imm >> 0) & 0xffff, 0); |
| 2059 movk(Tmp0(), (imm >> 16) & 0xffff, 16); | 2090 movk(temp, (imm >> 16) & 0xffff, 16); |
| 2060 movk(Tmp0(), (imm >> 32) & 0xffff, 32); | 2091 movk(temp, (imm >> 32) & 0xffff, 32); |
| 2061 movk(Tmp0(), (imm >> 48) & 0xffff, 48); | 2092 movk(temp, (imm >> 48) & 0xffff, 48); |
| 2062 } else { | 2093 } else { |
| 2063 LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode)); | 2094 LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode)); |
| 2064 } | 2095 } |
| 2065 Blr(Tmp0()); | 2096 Blr(temp); |
| 2066 #ifdef DEBUG | 2097 #ifdef DEBUG |
| 2067 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); | 2098 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); |
| 2068 #endif | 2099 #endif |
| 2069 } | 2100 } |
| 2070 | 2101 |
| 2071 | 2102 |
| 2072 void MacroAssembler::Call(Handle<Code> code, | 2103 void MacroAssembler::Call(Handle<Code> code, |
| 2073 RelocInfo::Mode rmode, | 2104 RelocInfo::Mode rmode, |
| 2074 TypeFeedbackId ast_id) { | 2105 TypeFeedbackId ast_id) { |
| 2075 #ifdef DEBUG | 2106 #ifdef DEBUG |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2136 | 2167 |
| 2137 | 2168 |
| 2138 | 2169 |
| 2139 | 2170 |
| 2140 | 2171 |
| 2141 void MacroAssembler::JumpForHeapNumber(Register object, | 2172 void MacroAssembler::JumpForHeapNumber(Register object, |
| 2142 Register heap_number_map, | 2173 Register heap_number_map, |
| 2143 Label* on_heap_number, | 2174 Label* on_heap_number, |
| 2144 Label* on_not_heap_number) { | 2175 Label* on_not_heap_number) { |
| 2145 ASSERT(on_heap_number || on_not_heap_number); | 2176 ASSERT(on_heap_number || on_not_heap_number); |
| 2146 // Tmp0() is used as a scratch register. | |
| 2147 ASSERT(!AreAliased(Tmp0(), heap_number_map)); | |
| 2148 AssertNotSmi(object); | 2177 AssertNotSmi(object); |
| 2149 | 2178 |
| 2179 UseScratchRegisterScope temps(this); | |
| 2180 Register temp = temps.AcquireX(); | |
| 2181 | |
| 2150 // Load the HeapNumber map if it is not passed. | 2182 // Load the HeapNumber map if it is not passed. |
| 2151 if (heap_number_map.Is(NoReg)) { | 2183 if (heap_number_map.Is(NoReg)) { |
| 2152 heap_number_map = Tmp1(); | 2184 heap_number_map = temps.AcquireX(); |
| 2153 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2185 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2154 } else { | 2186 } else { |
| 2155 // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map. | |
| 2156 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2187 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2157 } | 2188 } |
| 2158 | 2189 |
| 2159 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 2190 ASSERT(!AreAliased(temp, heap_number_map)); |
| 2160 Cmp(Tmp0(), heap_number_map); | 2191 |
| 2192 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 2193 Cmp(temp, heap_number_map); | |
| 2161 | 2194 |
| 2162 if (on_heap_number) { | 2195 if (on_heap_number) { |
| 2163 B(eq, on_heap_number); | 2196 B(eq, on_heap_number); |
| 2164 } | 2197 } |
| 2165 if (on_not_heap_number) { | 2198 if (on_not_heap_number) { |
| 2166 B(ne, on_not_heap_number); | 2199 B(ne, on_not_heap_number); |
| 2167 } | 2200 } |
| 2168 } | 2201 } |
| 2169 | 2202 |
| 2170 | 2203 |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2276 B(on_successful_conversion, eq); | 2309 B(on_successful_conversion, eq); |
| 2277 } | 2310 } |
| 2278 if (on_failed_conversion) { | 2311 if (on_failed_conversion) { |
| 2279 B(on_failed_conversion, ne); | 2312 B(on_failed_conversion, ne); |
| 2280 } | 2313 } |
| 2281 } | 2314 } |
| 2282 | 2315 |
| 2283 | 2316 |
| 2284 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, | 2317 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, |
| 2285 Label* on_negative_zero) { | 2318 Label* on_negative_zero) { |
| 2319 UseScratchRegisterScope temps(this); | |
| 2320 Register temp = temps.AcquireX(); | |
| 2286 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will | 2321 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will |
| 2287 // cause overflow. | 2322 // cause overflow. |
| 2288 Fmov(Tmp0(), input); | 2323 Fmov(temp, input); |
| 2289 Cmp(Tmp0(), 1); | 2324 Cmp(temp, 1); |
| 2290 B(vs, on_negative_zero); | 2325 B(vs, on_negative_zero); |
| 2291 } | 2326 } |
| 2292 | 2327 |
| 2293 | 2328 |
| 2294 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { | 2329 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { |
| 2295 // Clamp the value to [0..255]. | 2330 // Clamp the value to [0..255]. |
| 2296 Cmp(input.W(), Operand(input.W(), UXTB)); | 2331 Cmp(input.W(), Operand(input.W(), UXTB)); |
| 2297 // If input < input & 0xff, it must be < 0, so saturate to 0. | 2332 // If input < input & 0xff, it must be < 0, so saturate to 0. |
| 2298 Csel(output.W(), wzr, input.W(), lt); | 2333 Csel(output.W(), wzr, input.W(), lt); |
| 2299 // Create a constant 0xff. | 2334 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255. |
| 2300 Mov(WTmp0(), 255); | 2335 Csel(output.W(), output.W(), 255, le); |
|
rmcilroy
2014/02/24 11:04:49
Could you pull out the behaviour change (changing
jbramley
2014/02/24 11:59:27
The behaviour is the same; I swapped the arguments
| |
| 2301 // If input > input & 0xff, it must be > 255, so saturate to 255. | |
| 2302 Csel(output.W(), WTmp0(), output.W(), gt); | |
| 2303 } | 2336 } |
| 2304 | 2337 |
| 2305 | 2338 |
| 2306 void MacroAssembler::ClampInt32ToUint8(Register in_out) { | 2339 void MacroAssembler::ClampInt32ToUint8(Register in_out) { |
| 2307 ClampInt32ToUint8(in_out, in_out); | 2340 ClampInt32ToUint8(in_out, in_out); |
| 2308 } | 2341 } |
| 2309 | 2342 |
| 2310 | 2343 |
| 2311 void MacroAssembler::ClampDoubleToUint8(Register output, | 2344 void MacroAssembler::ClampDoubleToUint8(Register output, |
| 2312 DoubleRegister input, | 2345 DoubleRegister input, |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 2326 // Values greater than 255 have already been clamped to 255. | 2359 // Values greater than 255 have already been clamped to 255. |
| 2327 Fcvtnu(output, dbl_scratch); | 2360 Fcvtnu(output, dbl_scratch); |
| 2328 } | 2361 } |
| 2329 | 2362 |
| 2330 | 2363 |
| 2331 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, | 2364 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, |
| 2332 Register src, | 2365 Register src, |
| 2333 unsigned count, | 2366 unsigned count, |
| 2334 Register scratch1, | 2367 Register scratch1, |
| 2335 Register scratch2, | 2368 Register scratch2, |
| 2336 Register scratch3) { | 2369 Register scratch3, |
| 2370 Register scratch4, | |
| 2371 Register scratch5) { | |
| 2337 // Untag src and dst into scratch registers. | 2372 // Untag src and dst into scratch registers. |
| 2338 // Copy src->dst in a tight loop. | 2373 // Copy src->dst in a tight loop. |
| 2339 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1())); | 2374 ASSERT(!AreAliased(dst, src, |
| 2375 scratch1, scratch2, scratch3, scratch4, scratch5)); | |
| 2340 ASSERT(count >= 2); | 2376 ASSERT(count >= 2); |
| 2341 | 2377 |
| 2342 const Register& remaining = scratch3; | 2378 const Register& remaining = scratch3; |
| 2343 Mov(remaining, count / 2); | 2379 Mov(remaining, count / 2); |
| 2344 | 2380 |
| 2345 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2346 InstructionAccurateScope scope(this); | |
| 2347 | |
| 2348 const Register& dst_untagged = scratch1; | 2381 const Register& dst_untagged = scratch1; |
| 2349 const Register& src_untagged = scratch2; | 2382 const Register& src_untagged = scratch2; |
| 2350 sub(dst_untagged, dst, kHeapObjectTag); | 2383 Sub(dst_untagged, dst, kHeapObjectTag); |
| 2351 sub(src_untagged, src, kHeapObjectTag); | 2384 Sub(src_untagged, src, kHeapObjectTag); |
| 2352 | 2385 |
| 2353 // Copy fields in pairs. | 2386 // Copy fields in pairs. |
| 2354 Label loop; | 2387 Label loop; |
| 2355 bind(&loop); | 2388 Bind(&loop); |
| 2356 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, | 2389 Ldp(scratch4, scratch5, |
| 2357 PostIndex)); | 2390 MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex)); |
| 2358 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, | 2391 Stp(scratch4, scratch5, |
| 2359 PostIndex)); | 2392 MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex)); |
| 2360 sub(remaining, remaining, 1); | 2393 Sub(remaining, remaining, 1); |
| 2361 cbnz(remaining, &loop); | 2394 Cbnz(remaining, &loop); |
| 2362 | 2395 |
| 2363 // Handle the leftovers. | 2396 // Handle the leftovers. |
| 2364 if (count & 1) { | 2397 if (count & 1) { |
| 2365 ldr(Tmp0(), MemOperand(src_untagged)); | 2398 Ldr(scratch4, MemOperand(src_untagged)); |
| 2366 str(Tmp0(), MemOperand(dst_untagged)); | 2399 Str(scratch4, MemOperand(dst_untagged)); |
| 2367 } | 2400 } |
| 2368 } | 2401 } |
| 2369 | 2402 |
| 2370 | 2403 |
| 2371 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, | 2404 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, |
| 2372 Register src, | 2405 Register src, |
| 2373 unsigned count, | 2406 unsigned count, |
| 2374 Register scratch1, | 2407 Register scratch1, |
| 2375 Register scratch2) { | 2408 Register scratch2, |
| 2409 Register scratch3, | |
| 2410 Register scratch4) { | |
| 2376 // Untag src and dst into scratch registers. | 2411 // Untag src and dst into scratch registers. |
| 2377 // Copy src->dst in an unrolled loop. | 2412 // Copy src->dst in an unrolled loop. |
| 2378 ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1())); | 2413 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); |
| 2379 | |
| 2380 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2381 InstructionAccurateScope scope(this); | |
| 2382 | 2414 |
| 2383 const Register& dst_untagged = scratch1; | 2415 const Register& dst_untagged = scratch1; |
| 2384 const Register& src_untagged = scratch2; | 2416 const Register& src_untagged = scratch2; |
| 2385 sub(dst_untagged, dst, kHeapObjectTag); | 2417 sub(dst_untagged, dst, kHeapObjectTag); |
| 2386 sub(src_untagged, src, kHeapObjectTag); | 2418 sub(src_untagged, src, kHeapObjectTag); |
| 2387 | 2419 |
| 2388 // Copy fields in pairs. | 2420 // Copy fields in pairs. |
| 2389 for (unsigned i = 0; i < count / 2; i++) { | 2421 for (unsigned i = 0; i < count / 2; i++) { |
| 2390 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, | 2422 Ldp(scratch3, scratch4, |
| 2391 PostIndex)); | 2423 MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex)); |
| 2392 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, | 2424 Stp(scratch3, scratch4, |
| 2393 PostIndex)); | 2425 MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex)); |
| 2394 } | 2426 } |
| 2395 | 2427 |
| 2396 // Handle the leftovers. | 2428 // Handle the leftovers. |
| 2397 if (count & 1) { | 2429 if (count & 1) { |
| 2398 ldr(Tmp0(), MemOperand(src_untagged)); | 2430 Ldr(scratch3, MemOperand(src_untagged)); |
| 2399 str(Tmp0(), MemOperand(dst_untagged)); | 2431 Str(scratch3, MemOperand(dst_untagged)); |
| 2400 } | 2432 } |
| 2401 } | 2433 } |
| 2402 | 2434 |
| 2403 | 2435 |
| 2404 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, | 2436 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, |
| 2405 Register src, | 2437 Register src, |
| 2406 unsigned count, | 2438 unsigned count, |
| 2407 Register scratch1) { | 2439 Register scratch1, |
| 2440 Register scratch2, | |
| 2441 Register scratch3) { | |
| 2408 // Untag src and dst into scratch registers. | 2442 // Untag src and dst into scratch registers. |
| 2409 // Copy src->dst in an unrolled loop. | 2443 // Copy src->dst in an unrolled loop. |
| 2410 ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1())); | 2444 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3)); |
| 2411 | |
| 2412 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
| 2413 InstructionAccurateScope scope(this); | |
| 2414 | 2445 |
| 2415 const Register& dst_untagged = scratch1; | 2446 const Register& dst_untagged = scratch1; |
| 2416 const Register& src_untagged = Tmp1(); | 2447 const Register& src_untagged = scratch2; |
| 2417 sub(dst_untagged, dst, kHeapObjectTag); | 2448 Sub(dst_untagged, dst, kHeapObjectTag); |
| 2418 sub(src_untagged, src, kHeapObjectTag); | 2449 Sub(src_untagged, src, kHeapObjectTag); |
| 2419 | 2450 |
| 2420 // Copy fields one by one. | 2451 // Copy fields one by one. |
| 2421 for (unsigned i = 0; i < count; i++) { | 2452 for (unsigned i = 0; i < count; i++) { |
| 2422 ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex)); | 2453 Ldr(scratch3, MemOperand(src_untagged, kXRegSizeInBytes, PostIndex)); |
| 2423 str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex)); | 2454 Str(scratch3, MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex)); |
| 2424 } | 2455 } |
| 2425 } | 2456 } |
| 2426 | 2457 |
| 2427 | 2458 |
| 2428 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, | 2459 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, |
| 2429 unsigned count) { | 2460 unsigned count) { |
| 2430 // One of two methods is used: | 2461 // One of two methods is used: |
| 2431 // | 2462 // |
| 2432 // For high 'count' values where many scratch registers are available: | 2463 // For high 'count' values where many scratch registers are available: |
| 2433 // Untag src and dst into scratch registers. | 2464 // Untag src and dst into scratch registers. |
| 2434 // Copy src->dst in a tight loop. | 2465 // Copy src->dst in a tight loop. |
| 2435 // | 2466 // |
| 2436 // For low 'count' values or where few scratch registers are available: | 2467 // For low 'count' values or where few scratch registers are available: |
| 2437 // Untag src and dst into scratch registers. | 2468 // Untag src and dst into scratch registers. |
| 2438 // Copy src->dst in an unrolled loop. | 2469 // Copy src->dst in an unrolled loop. |
| 2439 // | 2470 // |
| 2440 // In both cases, fields are copied in pairs if possible, and left-overs are | 2471 // In both cases, fields are copied in pairs if possible, and left-overs are |
| 2441 // handled separately. | 2472 // handled separately. |
| 2473 ASSERT(!AreAliased(dst, src)); | |
| 2442 ASSERT(!temps.IncludesAliasOf(dst)); | 2474 ASSERT(!temps.IncludesAliasOf(dst)); |
| 2443 ASSERT(!temps.IncludesAliasOf(src)); | 2475 ASSERT(!temps.IncludesAliasOf(src)); |
| 2444 ASSERT(!temps.IncludesAliasOf(Tmp0())); | |
| 2445 ASSERT(!temps.IncludesAliasOf(Tmp1())); | |
| 2446 ASSERT(!temps.IncludesAliasOf(xzr)); | 2476 ASSERT(!temps.IncludesAliasOf(xzr)); |
| 2447 ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1())); | |
| 2448 | 2477 |
| 2449 if (emit_debug_code()) { | 2478 if (emit_debug_code()) { |
| 2450 Cmp(dst, src); | 2479 Cmp(dst, src); |
| 2451 Check(ne, kTheSourceAndDestinationAreTheSame); | 2480 Check(ne, kTheSourceAndDestinationAreTheSame); |
| 2452 } | 2481 } |
| 2453 | 2482 |
| 2454 // The value of 'count' at which a loop will be generated (if there are | 2483 // The value of 'count' at which a loop will be generated (if there are |
| 2455 // enough scratch registers). | 2484 // enough scratch registers). |
| 2456 static const unsigned kLoopThreshold = 8; | 2485 static const unsigned kLoopThreshold = 8; |
| 2457 | 2486 |
| 2458 ASSERT(!temps.IsEmpty()); | 2487 // Give all temps to the MacroAssembler. |
| 2459 Register scratch1 = Register(temps.PopLowestIndex()); | 2488 UseScratchRegisterScope masm_temps(this); |
| 2460 Register scratch2 = Register(temps.PopLowestIndex()); | 2489 masm_temps.Include(temps); |
|
rmcilroy
2014/02/24 11:04:49
I actually think the old approach here of popping
jbramley
2014/02/24 11:59:27
I did it this way because if there are only two re
| |
| 2461 Register scratch3 = Register(temps.PopLowestIndex()); | 2490 int tmp_count = TmpList()->Count(); |
| 2462 | 2491 |
| 2463 if (scratch3.IsValid() && (count >= kLoopThreshold)) { | 2492 if ((tmp_count >= 5) && (count >= kLoopThreshold)) { |
| 2464 CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3); | 2493 CopyFieldsLoopPairsHelper(dst, src, count, |
| 2465 } else if (scratch2.IsValid()) { | 2494 masm_temps.AcquireX(), |
| 2466 CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2); | 2495 masm_temps.AcquireX(), |
| 2467 } else if (scratch1.IsValid()) { | 2496 masm_temps.AcquireX(), |
| 2468 CopyFieldsUnrolledHelper(dst, src, count, scratch1); | 2497 masm_temps.AcquireX(), |
| 2498 masm_temps.AcquireX()); | |
| 2499 } else if (tmp_count >= 4) { | |
| 2500 CopyFieldsUnrolledPairsHelper(dst, src, count, | |
| 2501 masm_temps.AcquireX(), | |
| 2502 masm_temps.AcquireX(), | |
| 2503 masm_temps.AcquireX(), | |
| 2504 masm_temps.AcquireX()); | |
| 2505 } else if (tmp_count == 3) { | |
| 2506 CopyFieldsUnrolledHelper(dst, src, count, | |
|
jbramley
2014/02/24 11:59:27
It's also worth noting that since the merge, we al
| |
| 2507 masm_temps.AcquireX(), | |
| 2508 masm_temps.AcquireX(), | |
| 2509 masm_temps.AcquireX()); | |
| 2469 } else { | 2510 } else { |
| 2470 UNREACHABLE(); | 2511 UNREACHABLE(); |
| 2471 } | 2512 } |
| 2472 } | 2513 } |
| 2473 | 2514 |
| 2474 | 2515 |
| 2475 void MacroAssembler::CopyBytes(Register dst, | 2516 void MacroAssembler::CopyBytes(Register dst, |
| 2476 Register src, | 2517 Register src, |
| 2477 Register length, | 2518 Register length, |
| 2478 Register scratch, | 2519 Register scratch, |
| (...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2886 | 2927 |
| 2887 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: | 2928 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: |
| 2888 // https://code.google.com/p/v8/issues/detail?id=3149 | 2929 // https://code.google.com/p/v8/issues/detail?id=3149 |
| 2889 Sxtw(result, result.W()); | 2930 Sxtw(result, result.W()); |
| 2890 } | 2931 } |
| 2891 | 2932 |
| 2892 | 2933 |
| 2893 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { | 2934 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { |
| 2894 if (frame_mode == BUILD_STUB_FRAME) { | 2935 if (frame_mode == BUILD_STUB_FRAME) { |
| 2895 ASSERT(StackPointer().Is(jssp)); | 2936 ASSERT(StackPointer().Is(jssp)); |
| 2937 UseScratchRegisterScope temps(this); | |
| 2938 Register temp = temps.AcquireX(); | |
| 2896 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already | 2939 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already |
| 2897 // have the special STUB smi? | 2940 // have the special STUB smi? |
| 2898 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB))); | 2941 __ Mov(temp, Operand(Smi::FromInt(StackFrame::STUB))); |
| 2899 // Compiled stubs don't age, and so they don't need the predictable code | 2942 // Compiled stubs don't age, and so they don't need the predictable code |
| 2900 // ageing sequence. | 2943 // ageing sequence. |
| 2901 __ Push(lr, fp, cp, Tmp0()); | 2944 __ Push(lr, fp, cp, temp); |
| 2902 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | 2945 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); |
| 2903 } else { | 2946 } else { |
| 2904 if (isolate()->IsCodePreAgingActive()) { | 2947 if (isolate()->IsCodePreAgingActive()) { |
| 2905 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); | 2948 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); |
| 2906 __ EmitCodeAgeSequence(stub); | 2949 __ EmitCodeAgeSequence(stub); |
| 2907 } else { | 2950 } else { |
| 2908 __ EmitFrameSetupForCodeAgePatching(); | 2951 __ EmitFrameSetupForCodeAgePatching(); |
| 2909 } | 2952 } |
| 2910 } | 2953 } |
| 2911 } | 2954 } |
| 2912 | 2955 |
| 2913 | 2956 |
| 2914 void MacroAssembler::EnterFrame(StackFrame::Type type) { | 2957 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
| 2915 ASSERT(jssp.Is(StackPointer())); | 2958 ASSERT(jssp.Is(StackPointer())); |
| 2959 UseScratchRegisterScope temps(this); | |
| 2960 Register type_reg = temps.AcquireX(); | |
| 2961 Register code_reg = temps.AcquireX(); | |
| 2962 | |
| 2916 Push(lr, fp, cp); | 2963 Push(lr, fp, cp); |
| 2917 Mov(Tmp1(), Operand(Smi::FromInt(type))); | 2964 Mov(type_reg, Operand(Smi::FromInt(type))); |
| 2918 Mov(Tmp0(), Operand(CodeObject())); | 2965 Mov(code_reg, Operand(CodeObject())); |
| 2919 Push(Tmp1(), Tmp0()); | 2966 Push(type_reg, code_reg); |
| 2920 // jssp[4] : lr | 2967 // jssp[4] : lr |
| 2921 // jssp[3] : fp | 2968 // jssp[3] : fp |
| 2922 // jssp[2] : cp | 2969 // jssp[2] : cp |
| 2923 // jssp[1] : type | 2970 // jssp[1] : type |
| 2924 // jssp[0] : code object | 2971 // jssp[0] : code object |
| 2925 | 2972 |
| 2926 // Adjust FP to point to saved FP. | 2973 // Adjust FP to point to saved FP. |
| 2927 add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); | 2974 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); |
| 2928 } | 2975 } |
| 2929 | 2976 |
| 2930 | 2977 |
| 2931 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | 2978 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
| 2932 ASSERT(jssp.Is(StackPointer())); | 2979 ASSERT(jssp.Is(StackPointer())); |
| 2933 // Drop the execution stack down to the frame pointer and restore | 2980 // Drop the execution stack down to the frame pointer and restore |
| 2934 // the caller frame pointer and return address. | 2981 // the caller frame pointer and return address. |
| 2935 Mov(jssp, fp); | 2982 Mov(jssp, fp); |
| 2936 AssertStackConsistency(); | 2983 AssertStackConsistency(); |
| 2937 Pop(fp, lr); | 2984 Pop(fp, lr); |
| (...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3183 // Trash the registers to simulate an allocation failure. | 3230 // Trash the registers to simulate an allocation failure. |
| 3184 // We apply salt to the original zap value to easily spot the values. | 3231 // We apply salt to the original zap value to easily spot the values. |
| 3185 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | 3232 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); |
| 3186 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | 3233 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); |
| 3187 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | 3234 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); |
| 3188 } | 3235 } |
| 3189 B(gc_required); | 3236 B(gc_required); |
| 3190 return; | 3237 return; |
| 3191 } | 3238 } |
| 3192 | 3239 |
| 3193 ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1())); | 3240 UseScratchRegisterScope temps(this); |
| 3194 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() && | 3241 Register scratch3 = temps.AcquireX(); |
| 3195 Tmp0().Is64Bits() && Tmp1().Is64Bits()); | 3242 |
| 3243 ASSERT(!AreAliased(result, scratch1, scratch2, scratch3)); | |
| 3244 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); | |
| 3196 | 3245 |
| 3197 // Make object size into bytes. | 3246 // Make object size into bytes. |
| 3198 if ((flags & SIZE_IN_WORDS) != 0) { | 3247 if ((flags & SIZE_IN_WORDS) != 0) { |
| 3199 object_size *= kPointerSize; | 3248 object_size *= kPointerSize; |
| 3200 } | 3249 } |
| 3201 ASSERT(0 == (object_size & kObjectAlignmentMask)); | 3250 ASSERT(0 == (object_size & kObjectAlignmentMask)); |
| 3202 | 3251 |
| 3203 // Check relative positions of allocation top and limit addresses. | 3252 // Check relative positions of allocation top and limit addresses. |
| 3204 // The values must be adjacent in memory to allow the use of LDP. | 3253 // The values must be adjacent in memory to allow the use of LDP. |
| 3205 ExternalReference heap_allocation_top = | 3254 ExternalReference heap_allocation_top = |
| 3206 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 3255 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
| 3207 ExternalReference heap_allocation_limit = | 3256 ExternalReference heap_allocation_limit = |
| 3208 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 3257 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 3209 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | 3258 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); |
| 3210 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | 3259 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); |
| 3211 ASSERT((limit - top) == kPointerSize); | 3260 ASSERT((limit - top) == kPointerSize); |
| 3212 | 3261 |
| 3213 // Set up allocation top address and object size registers. | 3262 // Set up allocation top address and object size registers. |
| 3214 Register top_address = scratch1; | 3263 Register top_address = scratch1; |
| 3215 Register allocation_limit = scratch2; | 3264 Register allocation_limit = scratch2; |
| 3216 Mov(top_address, Operand(heap_allocation_top)); | 3265 Mov(top_address, Operand(heap_allocation_top)); |
| 3217 | 3266 |
| 3218 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 3267 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 3219 // Load allocation top into result and the allocation limit. | 3268 // Load allocation top into result and the allocation limit. |
| 3220 Ldp(result, allocation_limit, MemOperand(top_address)); | 3269 Ldp(result, allocation_limit, MemOperand(top_address)); |
| 3221 } else { | 3270 } else { |
| 3222 if (emit_debug_code()) { | 3271 if (emit_debug_code()) { |
| 3223 // Assert that result actually contains top on entry. | 3272 // Assert that result actually contains top on entry. |
| 3224 Ldr(Tmp0(), MemOperand(top_address)); | 3273 Ldr(scratch3, MemOperand(top_address)); |
| 3225 Cmp(result, Tmp0()); | 3274 Cmp(result, scratch3); |
| 3226 Check(eq, kUnexpectedAllocationTop); | 3275 Check(eq, kUnexpectedAllocationTop); |
| 3227 } | 3276 } |
| 3228 // Load the allocation limit. 'result' already contains the allocation top. | 3277 // Load the allocation limit. 'result' already contains the allocation top. |
| 3229 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | 3278 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
| 3230 } | 3279 } |
| 3231 | 3280 |
| 3232 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 3281 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
| 3233 // the same alignment on A64. | 3282 // the same alignment on A64. |
| 3234 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 3283 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 3235 | 3284 |
| 3236 // Calculate new top and bail out if new space is exhausted. | 3285 // Calculate new top and bail out if new space is exhausted. |
| 3237 Adds(Tmp1(), result, object_size); | 3286 Adds(scratch3, result, object_size); |
| 3238 B(vs, gc_required); | 3287 B(vs, gc_required); |
| 3239 Cmp(Tmp1(), allocation_limit); | 3288 Cmp(scratch3, allocation_limit); |
| 3240 B(hi, gc_required); | 3289 B(hi, gc_required); |
| 3241 Str(Tmp1(), MemOperand(top_address)); | 3290 Str(scratch3, MemOperand(top_address)); |
| 3242 | 3291 |
| 3243 // Tag the object if requested. | 3292 // Tag the object if requested. |
| 3244 if ((flags & TAG_OBJECT) != 0) { | 3293 if ((flags & TAG_OBJECT) != 0) { |
| 3245 Orr(result, result, kHeapObjectTag); | 3294 Orr(result, result, kHeapObjectTag); |
| 3246 } | 3295 } |
| 3247 } | 3296 } |
| 3248 | 3297 |
| 3249 | 3298 |
| 3250 void MacroAssembler::Allocate(Register object_size, | 3299 void MacroAssembler::Allocate(Register object_size, |
| 3251 Register result, | 3300 Register result, |
| 3252 Register scratch1, | 3301 Register scratch1, |
| 3253 Register scratch2, | 3302 Register scratch2, |
| 3254 Label* gc_required, | 3303 Label* gc_required, |
| 3255 AllocationFlags flags) { | 3304 AllocationFlags flags) { |
| 3256 if (!FLAG_inline_new) { | 3305 if (!FLAG_inline_new) { |
| 3257 if (emit_debug_code()) { | 3306 if (emit_debug_code()) { |
| 3258 // Trash the registers to simulate an allocation failure. | 3307 // Trash the registers to simulate an allocation failure. |
| 3259 // We apply salt to the original zap value to easily spot the values. | 3308 // We apply salt to the original zap value to easily spot the values. |
| 3260 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | 3309 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); |
| 3261 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | 3310 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); |
| 3262 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | 3311 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); |
| 3263 } | 3312 } |
| 3264 B(gc_required); | 3313 B(gc_required); |
| 3265 return; | 3314 return; |
| 3266 } | 3315 } |
| 3267 | 3316 |
| 3268 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1())); | 3317 UseScratchRegisterScope temps(this); |
| 3269 ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() && | 3318 Register scratch3 = temps.AcquireX(); |
| 3270 scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits()); | 3319 |
| 3320 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3)); | |
| 3321 ASSERT(object_size.Is64Bits() && result.Is64Bits() && | |
| 3322 scratch1.Is64Bits() && scratch2.Is64Bits()); | |
| 3271 | 3323 |
| 3272 // Check relative positions of allocation top and limit addresses. | 3324 // Check relative positions of allocation top and limit addresses. |
| 3273 // The values must be adjacent in memory to allow the use of LDP. | 3325 // The values must be adjacent in memory to allow the use of LDP. |
| 3274 ExternalReference heap_allocation_top = | 3326 ExternalReference heap_allocation_top = |
| 3275 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 3327 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
| 3276 ExternalReference heap_allocation_limit = | 3328 ExternalReference heap_allocation_limit = |
| 3277 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 3329 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 3278 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | 3330 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); |
| 3279 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | 3331 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); |
| 3280 ASSERT((limit - top) == kPointerSize); | 3332 ASSERT((limit - top) == kPointerSize); |
| 3281 | 3333 |
| 3282 // Set up allocation top address and object size registers. | 3334 // Set up allocation top address and object size registers. |
| 3283 Register top_address = scratch1; | 3335 Register top_address = scratch1; |
| 3284 Register allocation_limit = scratch2; | 3336 Register allocation_limit = scratch2; |
| 3285 Mov(top_address, Operand(heap_allocation_top)); | 3337 Mov(top_address, Operand(heap_allocation_top)); |
| 3286 | 3338 |
| 3287 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 3339 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 3288 // Load allocation top into result and the allocation limit. | 3340 // Load allocation top into result and the allocation limit. |
| 3289 Ldp(result, allocation_limit, MemOperand(top_address)); | 3341 Ldp(result, allocation_limit, MemOperand(top_address)); |
| 3290 } else { | 3342 } else { |
| 3291 if (emit_debug_code()) { | 3343 if (emit_debug_code()) { |
| 3292 // Assert that result actually contains top on entry. | 3344 // Assert that result actually contains top on entry. |
| 3293 Ldr(Tmp0(), MemOperand(top_address)); | 3345 Ldr(scratch3, MemOperand(top_address)); |
| 3294 Cmp(result, Tmp0()); | 3346 Cmp(result, scratch3); |
| 3295 Check(eq, kUnexpectedAllocationTop); | 3347 Check(eq, kUnexpectedAllocationTop); |
| 3296 } | 3348 } |
| 3297 // Load the allocation limit. 'result' already contains the allocation top. | 3349 // Load the allocation limit. 'result' already contains the allocation top. |
| 3298 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | 3350 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
| 3299 } | 3351 } |
| 3300 | 3352 |
| 3301 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 3353 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
| 3302 // the same alignment on A64. | 3354 // the same alignment on A64. |
| 3303 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 3355 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 3304 | 3356 |
| 3305 // Calculate new top and bail out if new space is exhausted | 3357 // Calculate new top and bail out if new space is exhausted |
| 3306 if ((flags & SIZE_IN_WORDS) != 0) { | 3358 if ((flags & SIZE_IN_WORDS) != 0) { |
| 3307 Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2)); | 3359 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2)); |
| 3308 } else { | 3360 } else { |
| 3309 Adds(Tmp1(), result, object_size); | 3361 Adds(scratch3, result, object_size); |
| 3310 } | 3362 } |
| 3311 | 3363 |
| 3312 if (emit_debug_code()) { | 3364 if (emit_debug_code()) { |
| 3313 Tst(Tmp1(), kObjectAlignmentMask); | 3365 Tst(scratch3, kObjectAlignmentMask); |
| 3314 Check(eq, kUnalignedAllocationInNewSpace); | 3366 Check(eq, kUnalignedAllocationInNewSpace); |
| 3315 } | 3367 } |
| 3316 | 3368 |
| 3317 B(vs, gc_required); | 3369 B(vs, gc_required); |
| 3318 Cmp(Tmp1(), allocation_limit); | 3370 Cmp(scratch3, allocation_limit); |
| 3319 B(hi, gc_required); | 3371 B(hi, gc_required); |
| 3320 Str(Tmp1(), MemOperand(top_address)); | 3372 Str(scratch3, MemOperand(top_address)); |
| 3321 | 3373 |
| 3322 // Tag the object if requested. | 3374 // Tag the object if requested. |
| 3323 if ((flags & TAG_OBJECT) != 0) { | 3375 if ((flags & TAG_OBJECT) != 0) { |
| 3324 Orr(result, result, kHeapObjectTag); | 3376 Orr(result, result, kHeapObjectTag); |
| 3325 } | 3377 } |
| 3326 } | 3378 } |
| 3327 | 3379 |
| 3328 | 3380 |
| 3329 void MacroAssembler::UndoAllocationInNewSpace(Register object, | 3381 void MacroAssembler::UndoAllocationInNewSpace(Register object, |
| 3330 Register scratch) { | 3382 Register scratch) { |
| (...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3638 } | 3690 } |
| 3639 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 3691 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 3640 Cmp(scratch, Operand(map)); | 3692 Cmp(scratch, Operand(map)); |
| 3641 B(ne, &fail); | 3693 B(ne, &fail); |
| 3642 Jump(success, RelocInfo::CODE_TARGET); | 3694 Jump(success, RelocInfo::CODE_TARGET); |
| 3643 Bind(&fail); | 3695 Bind(&fail); |
| 3644 } | 3696 } |
| 3645 | 3697 |
| 3646 | 3698 |
| 3647 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { | 3699 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { |
| 3648 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 3700 UseScratchRegisterScope temps(this); |
| 3649 Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset)); | 3701 Register temp = temps.AcquireX(); |
| 3650 Tst(Tmp0(), mask); | 3702 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3703 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); | |
| 3704 Tst(temp, mask); | |
| 3651 } | 3705 } |
| 3652 | 3706 |
| 3653 | 3707 |
| 3654 void MacroAssembler::LoadElementsKind(Register result, Register object) { | 3708 void MacroAssembler::LoadElementsKind(Register result, Register object) { |
| 3655 // Load map. | 3709 // Load map. |
| 3656 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); | 3710 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3657 // Load the map's "bit field 2". | 3711 // Load the map's "bit field 2". |
| 3658 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset)); | 3712 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset)); |
| 3659 // Retrieve elements_kind from bit field 2. | 3713 // Retrieve elements_kind from bit field 2. |
| 3660 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); | 3714 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3712 Bind(&non_instance); | 3766 Bind(&non_instance); |
| 3713 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | 3767 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); |
| 3714 | 3768 |
| 3715 // All done. | 3769 // All done. |
| 3716 Bind(&done); | 3770 Bind(&done); |
| 3717 } | 3771 } |
| 3718 | 3772 |
| 3719 | 3773 |
| 3720 void MacroAssembler::CompareRoot(const Register& obj, | 3774 void MacroAssembler::CompareRoot(const Register& obj, |
| 3721 Heap::RootListIndex index) { | 3775 Heap::RootListIndex index) { |
| 3722 ASSERT(!AreAliased(obj, Tmp0())); | 3776 UseScratchRegisterScope temps(this); |
| 3723 LoadRoot(Tmp0(), index); | 3777 Register temp = temps.AcquireX(); |
| 3724 Cmp(obj, Tmp0()); | 3778 ASSERT(!AreAliased(obj, temp)); |
| 3779 LoadRoot(temp, index); | |
| 3780 Cmp(obj, temp); | |
| 3725 } | 3781 } |
| 3726 | 3782 |
| 3727 | 3783 |
| 3728 void MacroAssembler::JumpIfRoot(const Register& obj, | 3784 void MacroAssembler::JumpIfRoot(const Register& obj, |
| 3729 Heap::RootListIndex index, | 3785 Heap::RootListIndex index, |
| 3730 Label* if_equal) { | 3786 Label* if_equal) { |
| 3731 CompareRoot(obj, index); | 3787 CompareRoot(obj, index); |
| 3732 B(eq, if_equal); | 3788 B(eq, if_equal); |
| 3733 } | 3789 } |
| 3734 | 3790 |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3911 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); | 3967 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); |
| 3912 Check(lt, kIndexIsTooLarge); | 3968 Check(lt, kIndexIsTooLarge); |
| 3913 | 3969 |
| 3914 ASSERT_EQ(0, Smi::FromInt(0)); | 3970 ASSERT_EQ(0, Smi::FromInt(0)); |
| 3915 Cmp(index, 0); | 3971 Cmp(index, 0); |
| 3916 Check(ge, kIndexIsNegative); | 3972 Check(ge, kIndexIsNegative); |
| 3917 } | 3973 } |
| 3918 | 3974 |
| 3919 | 3975 |
| 3920 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 3976 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 3921 Register scratch, | 3977 Register scratch1, |
| 3978 Register scratch2, | |
| 3922 Label* miss) { | 3979 Label* miss) { |
| 3923 // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function. | 3980 ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); |
| 3924 // The ARM version takes two scratch registers, and that should be enough for | |
| 3925 // all of the checks. | |
| 3926 | |
| 3927 Label same_contexts; | 3981 Label same_contexts; |
| 3928 | 3982 |
| 3929 ASSERT(!AreAliased(holder_reg, scratch)); | |
| 3930 | |
| 3931 // Load current lexical context from the stack frame. | 3983 // Load current lexical context from the stack frame. |
| 3932 Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 3984 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 3933 // In debug mode, make sure the lexical context is set. | 3985 // In debug mode, make sure the lexical context is set. |
| 3934 #ifdef DEBUG | 3986 #ifdef DEBUG |
| 3935 Cmp(scratch, 0); | 3987 Cmp(scratch1, 0); |
| 3936 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | 3988 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); |
| 3937 #endif | 3989 #endif |
| 3938 | 3990 |
| 3939 // Load the native context of the current context. | 3991 // Load the native context of the current context. |
| 3940 int offset = | 3992 int offset = |
| 3941 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; | 3993 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
| 3942 Ldr(scratch, FieldMemOperand(scratch, offset)); | 3994 Ldr(scratch1, FieldMemOperand(scratch1, offset)); |
| 3943 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 3995 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); |
| 3944 | 3996 |
| 3945 // Check the context is a native context. | 3997 // Check the context is a native context. |
| 3946 if (emit_debug_code()) { | 3998 if (emit_debug_code()) { |
| 3947 // Read the first word and compare to the global_context_map. | 3999 // Read the first word and compare to the global_context_map. |
| 3948 Register temp = Tmp1(); | 4000 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset)); |
| 3949 Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 4001 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex); |
| 3950 CompareRoot(temp, Heap::kNativeContextMapRootIndex); | |
| 3951 Check(eq, kExpectedNativeContext); | 4002 Check(eq, kExpectedNativeContext); |
| 3952 } | 4003 } |
| 3953 | 4004 |
| 3954 // Check if both contexts are the same. | 4005 // Check if both contexts are the same. |
| 3955 ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 4006 Ldr(scratch2, FieldMemOperand(holder_reg, |
| 3956 cmp(scratch, Tmp0()); | 4007 JSGlobalProxy::kNativeContextOffset)); |
| 3957 b(&same_contexts, eq); | 4008 Cmp(scratch1, scratch2); |
| 4009 B(&same_contexts, eq); | |
| 3958 | 4010 |
| 3959 // Check the context is a native context. | 4011 // Check the context is a native context. |
| 3960 if (emit_debug_code()) { | 4012 if (emit_debug_code()) { |
| 3961 // Move Tmp0() into a different register, as CompareRoot will use it. | 4013 // We're short on scratch registers here, so use holder_reg as a scratch. |
| 3962 Register temp = Tmp1(); | 4014 Push(holder_reg); |
| 3963 mov(temp, Tmp0()); | 4015 Register scratch3 = holder_reg; |
| 3964 CompareRoot(temp, Heap::kNullValueRootIndex); | 4016 |
| 4017 CompareRoot(scratch2, Heap::kNullValueRootIndex); | |
| 3965 Check(ne, kExpectedNonNullContext); | 4018 Check(ne, kExpectedNonNullContext); |
| 3966 | 4019 |
| 3967 Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset)); | 4020 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); |
| 3968 CompareRoot(temp, Heap::kNativeContextMapRootIndex); | 4021 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex); |
| 3969 Check(eq, kExpectedNativeContext); | 4022 Check(eq, kExpectedNativeContext); |
| 3970 | 4023 Pop(holder_reg); |
| 3971 // Let's consider that Tmp0() has been cloberred by the MacroAssembler. | |
| 3972 // We reload it with its value. | |
| 3973 ldr(Tmp0(), FieldMemOperand(holder_reg, | |
| 3974 JSGlobalProxy::kNativeContextOffset)); | |
| 3975 } | 4024 } |
| 3976 | 4025 |
| 3977 // Check that the security token in the calling global object is | 4026 // Check that the security token in the calling global object is |
| 3978 // compatible with the security token in the receiving global | 4027 // compatible with the security token in the receiving global |
| 3979 // object. | 4028 // object. |
| 3980 int token_offset = Context::kHeaderSize + | 4029 int token_offset = Context::kHeaderSize + |
| 3981 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 4030 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
| 3982 | 4031 |
| 3983 ldr(scratch, FieldMemOperand(scratch, token_offset)); | 4032 Ldr(scratch1, FieldMemOperand(scratch1, token_offset)); |
| 3984 ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset)); | 4033 Ldr(scratch2, FieldMemOperand(scratch2, token_offset)); |
| 3985 cmp(scratch, Tmp0()); | 4034 Cmp(scratch1, scratch2); |
| 3986 b(miss, ne); | 4035 B(miss, ne); |
| 3987 | 4036 |
| 3988 bind(&same_contexts); | 4037 Bind(&same_contexts); |
| 3989 } | 4038 } |
| 3990 | 4039 |
| 3991 | 4040 |
| 3992 // Compute the hash code from the untagged key. This must be kept in sync with | 4041 // Compute the hash code from the untagged key. This must be kept in sync with |
| 3993 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in | 4042 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in |
| 3994 // code-stub-hydrogen.cc | 4043 // code-stub-hydrogen.cc |
| 3995 void MacroAssembler::GetNumberHash(Register key, Register scratch) { | 4044 void MacroAssembler::GetNumberHash(Register key, Register scratch) { |
| 3996 ASSERT(!AreAliased(key, scratch)); | 4045 ASSERT(!AreAliased(key, scratch)); |
| 3997 | 4046 |
| 3998 // Xor original key with a seed. | 4047 // Xor original key with a seed. |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4081 | 4130 |
| 4082 // Get the value at the masked, scaled index and return. | 4131 // Get the value at the masked, scaled index and return. |
| 4083 const int kValueOffset = | 4132 const int kValueOffset = |
| 4084 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 4133 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
| 4085 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); | 4134 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); |
| 4086 } | 4135 } |
| 4087 | 4136 |
| 4088 | 4137 |
| 4089 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | 4138 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. |
| 4090 Register address, | 4139 Register address, |
| 4091 Register scratch, | 4140 Register scratch1, |
| 4141 Register scratch2, | |
| 4092 SaveFPRegsMode fp_mode, | 4142 SaveFPRegsMode fp_mode, |
| 4093 RememberedSetFinalAction and_then) { | 4143 RememberedSetFinalAction and_then) { |
| 4094 ASSERT(!AreAliased(object, address, scratch)); | 4144 ASSERT(!AreAliased(object, address, scratch1)); |
| 4095 Label done, store_buffer_overflow; | 4145 Label done, store_buffer_overflow; |
| 4096 if (emit_debug_code()) { | 4146 if (emit_debug_code()) { |
| 4147 UseScratchRegisterScope temps(this); | |
| 4148 temps.Include(scratch1, scratch2); | |
|
rmcilroy
2014/02/24 11:04:49
nit - is this Include really required here?
jbramley
2014/02/24 11:59:27
Yes, I'm afraid so. JumpIfNotInNewSpace needs some
| |
| 4149 | |
| 4097 Label ok; | 4150 Label ok; |
| 4098 JumpIfNotInNewSpace(object, &ok); | 4151 JumpIfNotInNewSpace(object, &ok); |
| 4099 Abort(kRememberedSetPointerInNewSpace); | 4152 Abort(kRememberedSetPointerInNewSpace); |
| 4100 bind(&ok); | 4153 bind(&ok); |
| 4101 } | 4154 } |
| 4102 // Load store buffer top. | 4155 // Load store buffer top. |
| 4103 Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate()))); | 4156 Mov(scratch2, Operand(ExternalReference::store_buffer_top(isolate()))); |
| 4104 Ldr(scratch, MemOperand(Tmp0())); | 4157 Ldr(scratch1, MemOperand(scratch2)); |
| 4105 // Store pointer to buffer and increment buffer top. | 4158 // Store pointer to buffer and increment buffer top. |
| 4106 Str(address, MemOperand(scratch, kPointerSize, PostIndex)); | 4159 Str(address, MemOperand(scratch1, kPointerSize, PostIndex)); |
| 4107 // Write back new top of buffer. | 4160 // Write back new top of buffer. |
| 4108 Str(scratch, MemOperand(Tmp0())); | 4161 Str(scratch1, MemOperand(scratch2)); |
| 4109 // Call stub on end of buffer. | 4162 // Call stub on end of buffer. |
| 4110 // Check for end of buffer. | 4163 // Check for end of buffer. |
| 4111 ASSERT(StoreBuffer::kStoreBufferOverflowBit == | 4164 ASSERT(StoreBuffer::kStoreBufferOverflowBit == |
| 4112 (1 << (14 + kPointerSizeLog2))); | 4165 (1 << (14 + kPointerSizeLog2))); |
| 4113 if (and_then == kFallThroughAtEnd) { | 4166 if (and_then == kFallThroughAtEnd) { |
| 4114 Tbz(scratch, (14 + kPointerSizeLog2), &done); | 4167 Tbz(scratch1, (14 + kPointerSizeLog2), &done); |
| 4115 } else { | 4168 } else { |
| 4116 ASSERT(and_then == kReturnAtEnd); | 4169 ASSERT(and_then == kReturnAtEnd); |
| 4117 Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow); | 4170 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow); |
| 4118 Ret(); | 4171 Ret(); |
| 4119 } | 4172 } |
| 4120 | 4173 |
| 4121 Bind(&store_buffer_overflow); | 4174 Bind(&store_buffer_overflow); |
| 4122 Push(lr); | 4175 Push(lr); |
| 4123 StoreBufferOverflowStub store_buffer_overflow_stub = | 4176 StoreBufferOverflowStub store_buffer_overflow_stub = |
| 4124 StoreBufferOverflowStub(fp_mode); | 4177 StoreBufferOverflowStub(fp_mode); |
| 4125 CallStub(&store_buffer_overflow_stub); | 4178 CallStub(&store_buffer_overflow_stub); |
| 4126 Pop(lr); | 4179 Pop(lr); |
| 4127 | 4180 |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4255 | 4308 |
| 4256 // Clobber clobbered input registers when running with the debug-code flag | 4309 // Clobber clobbered input registers when running with the debug-code flag |
| 4257 // turned on to provoke errors. | 4310 // turned on to provoke errors. |
| 4258 if (emit_debug_code()) { | 4311 if (emit_debug_code()) { |
| 4259 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); | 4312 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); |
| 4260 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); | 4313 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); |
| 4261 } | 4314 } |
| 4262 } | 4315 } |
| 4263 | 4316 |
| 4264 | 4317 |
| 4265 // Will clobber: object, address, value, Tmp0(), Tmp1(). | 4318 // Will clobber: object, address, value. |
| 4266 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. | 4319 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. |
| 4267 // | 4320 // |
| 4268 // The register 'object' contains a heap object pointer. The heap object tag is | 4321 // The register 'object' contains a heap object pointer. The heap object tag is |
| 4269 // shifted away. | 4322 // shifted away. |
| 4270 void MacroAssembler::RecordWrite(Register object, | 4323 void MacroAssembler::RecordWrite(Register object, |
| 4271 Register address, | 4324 Register address, |
| 4272 Register value, | 4325 Register value, |
| 4273 LinkRegisterStatus lr_status, | 4326 LinkRegisterStatus lr_status, |
| 4274 SaveFPRegsMode fp_mode, | 4327 SaveFPRegsMode fp_mode, |
| 4275 RememberedSetAction remembered_set_action, | 4328 RememberedSetAction remembered_set_action, |
| 4276 SmiCheck smi_check) { | 4329 SmiCheck smi_check) { |
| 4277 ASM_LOCATION("MacroAssembler::RecordWrite"); | 4330 ASM_LOCATION("MacroAssembler::RecordWrite"); |
| 4278 ASSERT(!AreAliased(object, value)); | 4331 ASSERT(!AreAliased(object, value)); |
| 4279 | 4332 |
| 4280 if (emit_debug_code()) { | 4333 if (emit_debug_code()) { |
| 4281 Ldr(Tmp0(), MemOperand(address)); | 4334 UseScratchRegisterScope temps(this); |
| 4282 Cmp(Tmp0(), value); | 4335 Register temp = temps.AcquireX(); |
| 4336 | |
| 4337 Ldr(temp, MemOperand(address)); | |
| 4338 Cmp(temp, value); | |
| 4283 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 4339 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
| 4284 } | 4340 } |
| 4285 | 4341 |
| 4286 // Count number of write barriers in generated code. | 4342 // Count number of write barriers in generated code. |
| 4287 isolate()->counters()->write_barriers_static()->Increment(); | 4343 isolate()->counters()->write_barriers_static()->Increment(); |
| 4288 // TODO(mstarzinger): Dynamic counter missing. | 4344 // TODO(mstarzinger): Dynamic counter missing. |
| 4289 | 4345 |
| 4290 // First, check if a write barrier is even needed. The tests below | 4346 // First, check if a write barrier is even needed. The tests below |
| 4291 // catch stores of smis and stores into the young generation. | 4347 // catch stores of smis and stores into the young generation. |
| 4292 Label done; | 4348 Label done; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4337 Tbz(reg, 1, &color_is_valid); | 4393 Tbz(reg, 1, &color_is_valid); |
| 4338 Abort(kUnexpectedColorFound); | 4394 Abort(kUnexpectedColorFound); |
| 4339 Bind(&color_is_valid); | 4395 Bind(&color_is_valid); |
| 4340 } | 4396 } |
| 4341 } | 4397 } |
| 4342 | 4398 |
| 4343 | 4399 |
| 4344 void MacroAssembler::GetMarkBits(Register addr_reg, | 4400 void MacroAssembler::GetMarkBits(Register addr_reg, |
| 4345 Register bitmap_reg, | 4401 Register bitmap_reg, |
| 4346 Register shift_reg) { | 4402 Register shift_reg) { |
| 4347 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg)); | 4403 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg)); |
| 4404 ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits()); | |
| 4348 // addr_reg is divided into fields: | 4405 // addr_reg is divided into fields: |
| 4349 // |63 page base 20|19 high 8|7 shift 3|2 0| | 4406 // |63 page base 20|19 high 8|7 shift 3|2 0| |
| 4350 // 'high' gives the index of the cell holding color bits for the object. | 4407 // 'high' gives the index of the cell holding color bits for the object. |
| 4351 // 'shift' gives the offset in the cell for this object's color. | 4408 // 'shift' gives the offset in the cell for this object's color. |
| 4352 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | 4409 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
| 4353 Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits); | 4410 UseScratchRegisterScope temps(this); |
| 4411 Register temp = temps.AcquireX(); | |
| 4412 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits); | |
| 4354 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); | 4413 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); |
| 4355 Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2)); | 4414 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2)); |
| 4356 // bitmap_reg: | 4415 // bitmap_reg: |
| 4357 // |63 page base 20|19 zeros 15|14 high 3|2 0| | 4416 // |63 page base 20|19 zeros 15|14 high 3|2 0| |
| 4358 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | 4417 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); |
| 4359 } | 4418 } |
| 4360 | 4419 |
| 4361 | 4420 |
| 4362 void MacroAssembler::HasColor(Register object, | 4421 void MacroAssembler::HasColor(Register object, |
| 4363 Register bitmap_scratch, | 4422 Register bitmap_scratch, |
| 4364 Register shift_scratch, | 4423 Register shift_scratch, |
| 4365 Label* has_color, | 4424 Label* has_color, |
| (...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4569 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { | 4628 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { |
| 4570 if (emit_debug_code()) { | 4629 if (emit_debug_code()) { |
| 4571 CheckRegisterIsClear(reg, reason); | 4630 CheckRegisterIsClear(reg, reason); |
| 4572 } | 4631 } |
| 4573 } | 4632 } |
| 4574 | 4633 |
| 4575 | 4634 |
| 4576 void MacroAssembler::AssertRegisterIsRoot(Register reg, | 4635 void MacroAssembler::AssertRegisterIsRoot(Register reg, |
| 4577 Heap::RootListIndex index, | 4636 Heap::RootListIndex index, |
| 4578 BailoutReason reason) { | 4637 BailoutReason reason) { |
| 4579 // CompareRoot uses Tmp0(). | |
| 4580 ASSERT(!reg.Is(Tmp0())); | |
| 4581 if (emit_debug_code()) { | 4638 if (emit_debug_code()) { |
| 4582 CompareRoot(reg, index); | 4639 CompareRoot(reg, index); |
| 4583 Check(eq, reason); | 4640 Check(eq, reason); |
| 4584 } | 4641 } |
| 4585 } | 4642 } |
| 4586 | 4643 |
| 4587 | 4644 |
| 4588 void MacroAssembler::AssertFastElements(Register elements) { | 4645 void MacroAssembler::AssertFastElements(Register elements) { |
| 4589 if (emit_debug_code()) { | 4646 if (emit_debug_code()) { |
| 4590 Register temp = Tmp1(); | 4647 UseScratchRegisterScope temps(this); |
| 4648 Register temp = temps.AcquireX(); | |
| 4591 Label ok; | 4649 Label ok; |
| 4592 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); | 4650 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 4593 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); | 4651 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); |
| 4594 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); | 4652 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); |
| 4595 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); | 4653 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); |
| 4596 Abort(kJSObjectWithFastElementsMapHasSlowElements); | 4654 Abort(kJSObjectWithFastElementsMapHasSlowElements); |
| 4597 Bind(&ok); | 4655 Bind(&ok); |
| 4598 } | 4656 } |
| 4599 } | 4657 } |
| 4600 | 4658 |
| 4601 | 4659 |
| 4602 void MacroAssembler::AssertIsString(const Register& object) { | 4660 void MacroAssembler::AssertIsString(const Register& object) { |
| 4603 if (emit_debug_code()) { | 4661 if (emit_debug_code()) { |
| 4604 Register temp = Tmp1(); | 4662 UseScratchRegisterScope temps(this); |
| 4663 Register temp = temps.AcquireX(); | |
| 4605 STATIC_ASSERT(kSmiTag == 0); | 4664 STATIC_ASSERT(kSmiTag == 0); |
| 4606 Tst(object, Operand(kSmiTagMask)); | 4665 Tst(object, Operand(kSmiTagMask)); |
| 4607 Check(ne, kOperandIsNotAString); | 4666 Check(ne, kOperandIsNotAString); |
| 4608 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 4667 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 4609 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | 4668 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); |
| 4610 Check(lo, kOperandIsNotAString); | 4669 Check(lo, kOperandIsNotAString); |
| 4611 } | 4670 } |
| 4612 } | 4671 } |
| 4613 | 4672 |
| 4614 | 4673 |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 4641 } | 4700 } |
| 4642 #endif | 4701 #endif |
| 4643 | 4702 |
| 4644 // Abort is used in some contexts where csp is the stack pointer. In order to | 4703 // Abort is used in some contexts where csp is the stack pointer. In order to |
| 4645 // simplify the CallRuntime code, make sure that jssp is the stack pointer. | 4704 // simplify the CallRuntime code, make sure that jssp is the stack pointer. |
| 4646 // There is no risk of register corruption here because Abort doesn't return. | 4705 // There is no risk of register corruption here because Abort doesn't return. |
| 4647 Register old_stack_pointer = StackPointer(); | 4706 Register old_stack_pointer = StackPointer(); |
| 4648 SetStackPointer(jssp); | 4707 SetStackPointer(jssp); |
| 4649 Mov(jssp, old_stack_pointer); | 4708 Mov(jssp, old_stack_pointer); |
| 4650 | 4709 |
| 4710 // We need some scratch registers for the MacroAssembler, so make sure we have | |
| 4711 // some. This is safe here because Abort never returns. | |
| 4712 RegList old_tmp_list = TmpList()->list(); | |
| 4713 TmpList()->Combine(ip0); | |
| 4714 TmpList()->Combine(ip1); | |
| 4715 | |
| 4651 if (use_real_aborts()) { | 4716 if (use_real_aborts()) { |
| 4652 // Avoid infinite recursion; Push contains some assertions that use Abort. | 4717 // Avoid infinite recursion; Push contains some assertions that use Abort. |
| 4653 NoUseRealAbortsScope no_real_aborts(this); | 4718 NoUseRealAbortsScope no_real_aborts(this); |
| 4654 | 4719 |
| 4655 Mov(x0, Operand(Smi::FromInt(reason))); | 4720 Mov(x0, Operand(Smi::FromInt(reason))); |
| 4656 Push(x0); | 4721 Push(x0); |
| 4657 | 4722 |
| 4658 if (!has_frame_) { | 4723 if (!has_frame_) { |
| 4659 // We don't actually want to generate a pile of code for this, so just | 4724 // We don't actually want to generate a pile of code for this, so just |
| 4660 // claim there is a stack frame, without generating one. | 4725 // claim there is a stack frame, without generating one. |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 4677 | 4742 |
| 4678 // Emit the message string directly in the instruction stream. | 4743 // Emit the message string directly in the instruction stream. |
| 4679 { | 4744 { |
| 4680 BlockConstPoolScope scope(this); | 4745 BlockConstPoolScope scope(this); |
| 4681 Bind(&msg_address); | 4746 Bind(&msg_address); |
| 4682 EmitStringData(GetBailoutReason(reason)); | 4747 EmitStringData(GetBailoutReason(reason)); |
| 4683 } | 4748 } |
| 4684 } | 4749 } |
| 4685 | 4750 |
| 4686 SetStackPointer(old_stack_pointer); | 4751 SetStackPointer(old_stack_pointer); |
| 4752 TmpList()->set_list(old_tmp_list); | |
| 4687 } | 4753 } |
| 4688 | 4754 |
| 4689 | 4755 |
| 4690 void MacroAssembler::LoadTransitionedArrayMapConditional( | 4756 void MacroAssembler::LoadTransitionedArrayMapConditional( |
| 4691 ElementsKind expected_kind, | 4757 ElementsKind expected_kind, |
| 4692 ElementsKind transitioned_kind, | 4758 ElementsKind transitioned_kind, |
| 4693 Register map_in_out, | 4759 Register map_in_out, |
| 4694 Register scratch, | 4760 Register scratch1, |
| 4761 Register scratch2, | |
| 4695 Label* no_map_match) { | 4762 Label* no_map_match) { |
| 4696 // Load the global or builtins object from the current context. | 4763 // Load the global or builtins object from the current context. |
| 4697 Ldr(scratch, GlobalObjectMemOperand()); | 4764 Ldr(scratch1, GlobalObjectMemOperand()); |
| 4698 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 4765 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); |
| 4699 | 4766 |
| 4700 // Check that the function's map is the same as the expected cached map. | 4767 // Check that the function's map is the same as the expected cached map. |
| 4701 Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX)); | 4768 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX)); |
| 4702 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | 4769 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; |
| 4703 Ldr(Tmp0(), FieldMemOperand(scratch, offset)); | 4770 Ldr(scratch2, FieldMemOperand(scratch1, offset)); |
| 4704 Cmp(map_in_out, Tmp0()); | 4771 Cmp(map_in_out, scratch2); |
| 4705 B(ne, no_map_match); | 4772 B(ne, no_map_match); |
| 4706 | 4773 |
| 4707 // Use the transitioned cached map. | 4774 // Use the transitioned cached map. |
| 4708 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | 4775 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; |
| 4709 Ldr(map_in_out, FieldMemOperand(scratch, offset)); | 4776 Ldr(map_in_out, FieldMemOperand(scratch1, offset)); |
| 4710 } | 4777 } |
| 4711 | 4778 |
| 4712 | 4779 |
| 4713 void MacroAssembler::LoadInitialArrayMap(Register function_in, | 4780 void MacroAssembler::LoadInitialArrayMap(Register function_in, |
| 4714 Register scratch, | 4781 Register scratch1, |
| 4782 Register scratch2, | |
| 4715 Register map_out, | 4783 Register map_out, |
| 4716 ArrayHasHoles holes) { | 4784 ArrayHasHoles holes) { |
| 4717 ASSERT(!AreAliased(function_in, scratch, map_out)); | 4785 ASSERT(!AreAliased(function_in, scratch1, scratch2, map_out)); |
| 4718 Label done; | 4786 Label done; |
| 4719 Ldr(map_out, FieldMemOperand(function_in, | 4787 Ldr(map_out, FieldMemOperand(function_in, |
| 4720 JSFunction::kPrototypeOrInitialMapOffset)); | 4788 JSFunction::kPrototypeOrInitialMapOffset)); |
| 4721 | 4789 |
| 4722 if (!FLAG_smi_only_arrays) { | 4790 if (!FLAG_smi_only_arrays) { |
| 4723 ElementsKind kind = (holes == kArrayCanHaveHoles) ? FAST_HOLEY_ELEMENTS | 4791 ElementsKind kind = (holes == kArrayCanHaveHoles) ? FAST_HOLEY_ELEMENTS |
| 4724 : FAST_ELEMENTS; | 4792 : FAST_ELEMENTS; |
| 4725 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, kind, map_out, | 4793 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, kind, map_out, |
| 4726 scratch, &done); | 4794 scratch1, scratch2, &done); |
| 4727 } else if (holes == kArrayCanHaveHoles) { | 4795 } else if (holes == kArrayCanHaveHoles) { |
| 4728 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | 4796 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, |
| 4729 FAST_HOLEY_SMI_ELEMENTS, map_out, | 4797 FAST_HOLEY_SMI_ELEMENTS, map_out, |
| 4730 scratch, &done); | 4798 scratch1, scratch2, &done); |
| 4731 } | 4799 } |
| 4732 Bind(&done); | 4800 Bind(&done); |
| 4733 } | 4801 } |
| 4734 | 4802 |
| 4735 | 4803 |
| 4736 void MacroAssembler::LoadArrayFunction(Register function) { | 4804 void MacroAssembler::LoadArrayFunction(Register function) { |
| 4737 // Load the global or builtins object from the current context. | 4805 // Load the global or builtins object from the current context. |
| 4738 Ldr(function, GlobalObjectMemOperand()); | 4806 Ldr(function, GlobalObjectMemOperand()); |
| 4739 // Load the global context from the global or builtins object. | 4807 // Load the global context from the global or builtins object. |
| 4740 Ldr(function, | 4808 Ldr(function, |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4775 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. | 4843 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. |
| 4776 void MacroAssembler::PrintfNoPreserve(const char * format, | 4844 void MacroAssembler::PrintfNoPreserve(const char * format, |
| 4777 const CPURegister& arg0, | 4845 const CPURegister& arg0, |
| 4778 const CPURegister& arg1, | 4846 const CPURegister& arg1, |
| 4779 const CPURegister& arg2, | 4847 const CPURegister& arg2, |
| 4780 const CPURegister& arg3) { | 4848 const CPURegister& arg3) { |
| 4781 // We cannot handle a caller-saved stack pointer. It doesn't make much sense | 4849 // We cannot handle a caller-saved stack pointer. It doesn't make much sense |
| 4782 // in most cases anyway, so this restriction shouldn't be too serious. | 4850 // in most cases anyway, so this restriction shouldn't be too serious. |
| 4783 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); | 4851 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); |
| 4784 | 4852 |
| 4785 // We cannot print Tmp0() or Tmp1() as they're used internally by the macro | 4853 // Make sure that the macro assembler doesn't try to use any of our arguments |
| 4786 // assembler. We cannot print the stack pointer because it is typically used | 4854 // as scratch registers. |
| 4787 // to preserve caller-saved registers (using other Printf variants which | 4855 UseScratchRegisterScope temps(this); |
| 4788 // depend on this helper). | 4856 temps.Exclude(arg0, arg1, arg2, arg3); |
| 4789 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0)); | 4857 |
| 4790 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1)); | 4858 // We cannot print the stack pointer because it is typically used to preserve |
| 4791 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2)); | 4859 // caller-saved registers (using other Printf variants which depend on this |
| 4792 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3)); | 4860 // helper). |
| 4861 ASSERT(!AreAliased(arg0, StackPointer())); | |
| 4862 ASSERT(!AreAliased(arg1, StackPointer())); | |
| 4863 ASSERT(!AreAliased(arg2, StackPointer())); | |
| 4864 ASSERT(!AreAliased(arg3, StackPointer())); | |
| 4793 | 4865 |
| 4794 static const int kMaxArgCount = 4; | 4866 static const int kMaxArgCount = 4; |
| 4795 // Assume that we have the maximum number of arguments until we know | 4867 // Assume that we have the maximum number of arguments until we know |
| 4796 // otherwise. | 4868 // otherwise. |
| 4797 int arg_count = kMaxArgCount; | 4869 int arg_count = kMaxArgCount; |
| 4798 | 4870 |
| 4799 // The provided arguments. | 4871 // The provided arguments. |
| 4800 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; | 4872 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; |
| 4801 | 4873 |
| 4802 // The PCS registers where the arguments need to end up. | 4874 // The PCS registers where the arguments need to end up. |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4924 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); | 4996 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); |
| 4925 #endif | 4997 #endif |
| 4926 } | 4998 } |
| 4927 | 4999 |
| 4928 | 5000 |
| 4929 void MacroAssembler::Printf(const char * format, | 5001 void MacroAssembler::Printf(const char * format, |
| 4930 const CPURegister& arg0, | 5002 const CPURegister& arg0, |
| 4931 const CPURegister& arg1, | 5003 const CPURegister& arg1, |
| 4932 const CPURegister& arg2, | 5004 const CPURegister& arg2, |
| 4933 const CPURegister& arg3) { | 5005 const CPURegister& arg3) { |
| 5006 // Printf is expected to preserve all registers, so make sure that none are | |
| 5007 // available as scratch registers until we've preserved them. | |
| 5008 UseScratchRegisterScope exclude_all(this); | |
| 5009 exclude_all.ExcludeAll(); | |
|
rmcilroy
2014/02/24 11:04:49
Can't we just simplify this to assert that arg0, a
jbramley
2014/02/24 11:59:27
Perhaps, but this way allows us to call Printf on
| |
| 5010 | |
| 4934 // Preserve all caller-saved registers as well as NZCV. | 5011 // Preserve all caller-saved registers as well as NZCV. |
| 4935 // If csp is the stack pointer, PushCPURegList asserts that the size of each | 5012 // If csp is the stack pointer, PushCPURegList asserts that the size of each |
| 4936 // list is a multiple of 16 bytes. | 5013 // list is a multiple of 16 bytes. |
| 4937 PushCPURegList(kCallerSaved); | 5014 PushCPURegList(kCallerSaved); |
| 4938 PushCPURegList(kCallerSavedFP); | 5015 PushCPURegList(kCallerSavedFP); |
| 4939 // Use Tmp0() as a scratch register. It is not accepted by Printf so it will | |
| 4940 // never overlap an argument register. | |
| 4941 Mrs(Tmp0(), NZCV); | |
| 4942 Push(Tmp0(), xzr); | |
| 4943 | 5016 |
| 4944 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); | 5017 { UseScratchRegisterScope temps(this); |
| 5018 // We can use caller-saved registers as scratch values (except for argN). | |
| 5019 TmpList()->Combine(kCallerSaved); | |
| 5020 FPTmpList()->Combine(kCallerSavedFP); | |
| 5021 temps.Exclude(arg0, arg1, arg2, arg3); | |
| 4945 | 5022 |
| 4946 Pop(xzr, Tmp0()); | 5023 // Preserve NZCV. |
| 4947 Msr(NZCV, Tmp0()); | 5024 Register tmp = temps.AcquireX(); |
| 5025 Mrs(tmp, NZCV); | |
| 5026 Push(tmp, xzr); | |
| 5027 | |
| 5028 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); | |
| 5029 | |
| 5030 Pop(xzr, tmp); | |
| 5031 Msr(NZCV, tmp); | |
| 5032 } | |
| 5033 | |
| 4948 PopCPURegList(kCallerSavedFP); | 5034 PopCPURegList(kCallerSavedFP); |
| 4949 PopCPURegList(kCallerSaved); | 5035 PopCPURegList(kCallerSaved); |
| 4950 } | 5036 } |
| 4951 | 5037 |
| 4952 | 5038 |
| 4953 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { | 5039 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { |
| 4954 // TODO(jbramley): Other architectures use the internal memcpy to copy the | 5040 // TODO(jbramley): Other architectures use the internal memcpy to copy the |
| 4955 // sequence. If this is a performance bottleneck, we should consider caching | 5041 // sequence. If this is a performance bottleneck, we should consider caching |
| 4956 // the sequence and copying it in the same way. | 5042 // the sequence and copying it in the same way. |
| 4957 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); | 5043 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5045 PatchingAssembler patcher(old, length); | 5131 PatchingAssembler patcher(old, length); |
| 5046 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL); | 5132 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL); |
| 5047 initialized = true; | 5133 initialized = true; |
| 5048 } | 5134 } |
| 5049 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; | 5135 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; |
| 5050 } | 5136 } |
| 5051 #endif | 5137 #endif |
| 5052 | 5138 |
| 5053 | 5139 |
| 5054 #undef __ | 5140 #undef __ |
| 5141 | |
| 5142 | |
| 5143 UseScratchRegisterScope::~UseScratchRegisterScope() { | |
| 5144 available_->set_list(old_available_); | |
| 5145 availablefp_->set_list(old_availablefp_); | |
| 5146 } | |
| 5147 | |
| 5148 | |
| 5149 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { | |
| 5150 int code = AcquireNextAvailable(available_).code(); | |
| 5151 return Register::Create(code, reg.SizeInBits()); | |
| 5152 } | |
| 5153 | |
| 5154 | |
| 5155 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { | |
| 5156 int code = AcquireNextAvailable(availablefp_).code(); | |
| 5157 return FPRegister::Create(code, reg.SizeInBits()); | |
| 5158 } | |
| 5159 | |
| 5160 | |
| 5161 void UseScratchRegisterScope::Release(const CPURegister& reg) { | |
| 5162 if (reg.IsRegister()) { | |
| 5163 ReleaseByCode(available_, reg.code()); | |
| 5164 } else if (reg.IsFPRegister()) { | |
| 5165 ReleaseByCode(availablefp_, reg.code()); | |
| 5166 } else { | |
| 5167 ASSERT(reg.IsNone()); | |
| 5168 } | |
| 5169 } | |
| 5170 | |
| 5171 | |
| 5172 void UseScratchRegisterScope::Include(const CPURegList& regs) { | |
|
rmcilroy
2014/02/24 11:04:49
Having had another look through the code, it seems
jbramley
2014/02/24 11:59:27
They're useful in tests (such as Dump), but there
rmcilroy
2014/02/24 13:12:49
The accidental use-case I'm thinking of is somethi
| |
| 5173 RegList include = regs.list(); | |
| 5174 CPURegister::RegisterType type = regs.type(); | |
| 5175 | |
| 5176 if (type == CPURegister::kRegister) { | |
| 5177 // Make sure that neither csp nor xzr are included the list. | |
| 5178 include &= ~(xzr.Bit() | csp.Bit()); | |
|
rmcilroy
2014/02/20 13:06:00
Maybe add an assert here too to warn if someone tr
jbramley
2014/02/20 13:23:12
My reasoning was that in several MacroAssembler op
rmcilroy
2014/02/24 11:04:49
As outlined above, I would like it if this method
| |
| 5179 IncludeByRegList(available_, include); | |
| 5180 } else if (type == CPURegister::kFPRegister) { | |
| 5181 IncludeByRegList(availablefp_, include); | |
| 5182 } else { | |
| 5183 ASSERT(type == CPURegister::kNoRegister); | |
| 5184 } | |
| 5185 } | |
| 5186 | |
| 5187 | |
| 5188 void UseScratchRegisterScope::Include(const Register& reg1, | |
| 5189 const Register& reg2, | |
| 5190 const Register& reg3, | |
| 5191 const Register& reg4) { | |
| 5192 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit(); | |
| 5193 // Make sure that neither csp nor xzr are included the list. | |
| 5194 include &= ~(xzr.Bit() | csp.Bit()); | |
| 5195 | |
| 5196 IncludeByRegList(available_, include); | |
| 5197 } | |
| 5198 | |
| 5199 | |
| 5200 void UseScratchRegisterScope::Include(const FPRegister& reg1, | |
| 5201 const FPRegister& reg2, | |
| 5202 const FPRegister& reg3, | |
| 5203 const FPRegister& reg4) { | |
| 5204 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit(); | |
| 5205 IncludeByRegList(availablefp_, include); | |
| 5206 } | |
| 5207 | |
| 5208 | |
| 5209 void UseScratchRegisterScope::Exclude(const Register& reg1, | |
| 5210 const Register& reg2, | |
| 5211 const Register& reg3, | |
| 5212 const Register& reg4) { | |
| 5213 RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit(); | |
| 5214 ExcludeByRegList(available_, exclude); | |
| 5215 } | |
| 5216 | |
| 5217 | |
| 5218 void UseScratchRegisterScope::Exclude(const FPRegister& reg1, | |
| 5219 const FPRegister& reg2, | |
| 5220 const FPRegister& reg3, | |
| 5221 const FPRegister& reg4) { | |
| 5222 RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit(); | |
| 5223 ExcludeByRegList(availablefp_, excludefp); | |
| 5224 } | |
| 5225 | |
| 5226 | |
| 5227 void UseScratchRegisterScope::Exclude(const CPURegister& reg1, | |
| 5228 const CPURegister& reg2, | |
| 5229 const CPURegister& reg3, | |
| 5230 const CPURegister& reg4) { | |
| 5231 RegList exclude = 0; | |
| 5232 RegList excludefp = 0; | |
| 5233 | |
| 5234 const CPURegister regs[] = {reg1, reg2, reg3, reg4}; | |
| 5235 | |
| 5236 for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) { | |
| 5237 if (regs[i].IsRegister()) { | |
| 5238 exclude |= regs[i].Bit(); | |
| 5239 } else if (regs[i].IsFPRegister()) { | |
| 5240 excludefp |= regs[i].Bit(); | |
| 5241 } else { | |
| 5242 ASSERT(regs[i].IsNone()); | |
| 5243 } | |
| 5244 } | |
| 5245 | |
| 5246 ExcludeByRegList(available_, exclude); | |
| 5247 ExcludeByRegList(availablefp_, excludefp); | |
| 5248 } | |
| 5249 | |
| 5250 | |
| 5251 void UseScratchRegisterScope::ExcludeAll() { | |
| 5252 ExcludeByRegList(available_, available_->list()); | |
| 5253 ExcludeByRegList(availablefp_, availablefp_->list()); | |
| 5254 } | |
| 5255 | |
| 5256 | |
| 5257 CPURegister UseScratchRegisterScope::AcquireNextAvailable( | |
| 5258 CPURegList* available) { | |
| 5259 CHECK(!available->IsEmpty()); | |
| 5260 CPURegister result = available->PopLowestIndex(); | |
| 5261 ASSERT(!AreAliased(result, xzr, csp)); | |
| 5262 return result; | |
| 5263 } | |
| 5264 | |
| 5265 | |
| 5266 void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) { | |
| 5267 ReleaseByRegList(available, static_cast<RegList>(1) << code); | |
| 5268 } | |
| 5269 | |
| 5270 | |
| 5271 void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available, | |
| 5272 RegList regs) { | |
| 5273 available->set_list(available->list() | regs); | |
| 5274 } | |
| 5275 | |
| 5276 | |
| 5277 void UseScratchRegisterScope::IncludeByRegList(CPURegList* available, | |
| 5278 RegList regs) { | |
| 5279 available->set_list(available->list() | regs); | |
| 5280 } | |
| 5281 | |
| 5282 | |
| 5283 void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available, | |
| 5284 RegList exclude) { | |
| 5285 available->set_list(available->list() & ~exclude); | |
| 5286 } | |
| 5287 | |
| 5288 | |
| 5055 #define __ masm-> | 5289 #define __ masm-> |
| 5056 | 5290 |
| 5057 | 5291 |
| 5058 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, | 5292 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, |
| 5059 const Label* smi_check) { | 5293 const Label* smi_check) { |
| 5060 Assembler::BlockConstPoolScope scope(masm); | 5294 Assembler::BlockConstPoolScope scope(masm); |
| 5061 if (reg.IsValid()) { | 5295 if (reg.IsValid()) { |
| 5062 ASSERT(smi_check->is_bound()); | 5296 ASSERT(smi_check->is_bound()); |
| 5063 ASSERT(reg.Is64Bits()); | 5297 ASSERT(reg.Is64Bits()); |
| 5064 | 5298 |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5096 } | 5330 } |
| 5097 } | 5331 } |
| 5098 | 5332 |
| 5099 | 5333 |
| 5100 #undef __ | 5334 #undef __ |
| 5101 | 5335 |
| 5102 | 5336 |
| 5103 } } // namespace v8::internal | 5337 } } // namespace v8::internal |
| 5104 | 5338 |
| 5105 #endif // V8_TARGET_ARCH_A64 | 5339 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |