OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, | 46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, |
47 byte * buffer, | 47 byte * buffer, |
48 unsigned buffer_size) | 48 unsigned buffer_size) |
49 : Assembler(arg_isolate, buffer, buffer_size), | 49 : Assembler(arg_isolate, buffer, buffer_size), |
50 generating_stub_(false), | 50 generating_stub_(false), |
51 #if DEBUG | 51 #if DEBUG |
52 allow_macro_instructions_(true), | 52 allow_macro_instructions_(true), |
53 #endif | 53 #endif |
54 has_frame_(false), | 54 has_frame_(false), |
55 use_real_aborts_(true), | 55 use_real_aborts_(true), |
56 sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) { | 56 sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) { |
57 if (isolate() != NULL) { | 57 if (isolate() != NULL) { |
58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
59 isolate()); | 59 isolate()); |
60 } | 60 } |
61 } | 61 } |
62 | 62 |
63 | 63 |
64 void MacroAssembler::LogicalMacro(const Register& rd, | 64 void MacroAssembler::LogicalMacro(const Register& rd, |
65 const Register& rn, | 65 const Register& rn, |
66 const Operand& operand, | 66 const Operand& operand, |
67 LogicalOp op) { | 67 LogicalOp op) { |
| 68 UseScratchRegisterScope temps(this); |
| 69 |
68 if (operand.NeedsRelocation()) { | 70 if (operand.NeedsRelocation()) { |
69 LoadRelocated(Tmp0(), operand); | 71 Register temp = temps.AcquireX(); |
70 Logical(rd, rn, Tmp0(), op); | 72 LoadRelocated(temp, operand); |
| 73 Logical(rd, rn, temp, op); |
71 | 74 |
72 } else if (operand.IsImmediate()) { | 75 } else if (operand.IsImmediate()) { |
73 int64_t immediate = operand.immediate(); | 76 int64_t immediate = operand.immediate(); |
74 unsigned reg_size = rd.SizeInBits(); | 77 unsigned reg_size = rd.SizeInBits(); |
75 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | 78 ASSERT(rd.Is64Bits() || is_uint32(immediate)); |
76 | 79 |
77 // If the operation is NOT, invert the operation and immediate. | 80 // If the operation is NOT, invert the operation and immediate. |
78 if ((op & NOT) == NOT) { | 81 if ((op & NOT) == NOT) { |
79 op = static_cast<LogicalOp>(op & ~NOT); | 82 op = static_cast<LogicalOp>(op & ~NOT); |
80 immediate = ~immediate; | 83 immediate = ~immediate; |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
118 UNREACHABLE(); | 121 UNREACHABLE(); |
119 } | 122 } |
120 } | 123 } |
121 | 124 |
122 unsigned n, imm_s, imm_r; | 125 unsigned n, imm_s, imm_r; |
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { | 126 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { |
124 // Immediate can be encoded in the instruction. | 127 // Immediate can be encoded in the instruction. |
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | 128 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); |
126 } else { | 129 } else { |
127 // Immediate can't be encoded: synthesize using move immediate. | 130 // Immediate can't be encoded: synthesize using move immediate. |
128 Register temp = AppropriateTempFor(rn); | 131 Register temp = temps.AcquireSameSizeAs(rn); |
129 Mov(temp, immediate); | 132 Mov(temp, immediate); |
130 if (rd.Is(csp)) { | 133 if (rd.Is(csp)) { |
131 // If rd is the stack pointer we cannot use it as the destination | 134 // If rd is the stack pointer we cannot use it as the destination |
132 // register so we use the temp register as an intermediate again. | 135 // register so we use the temp register as an intermediate again. |
133 Logical(temp, rn, temp, op); | 136 Logical(temp, rn, temp, op); |
134 Mov(csp, temp); | 137 Mov(csp, temp); |
135 } else { | 138 } else { |
136 Logical(rd, rn, temp, op); | 139 Logical(rd, rn, temp, op); |
137 } | 140 } |
138 } | 141 } |
139 | 142 |
140 } else if (operand.IsExtendedRegister()) { | 143 } else if (operand.IsExtendedRegister()) { |
141 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 144 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
142 // Add/sub extended supports shift <= 4. We want to support exactly the | 145 // Add/sub extended supports shift <= 4. We want to support exactly the |
143 // same modes here. | 146 // same modes here. |
144 ASSERT(operand.shift_amount() <= 4); | 147 ASSERT(operand.shift_amount() <= 4); |
145 ASSERT(operand.reg().Is64Bits() || | 148 ASSERT(operand.reg().Is64Bits() || |
146 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | 149 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
147 Register temp = AppropriateTempFor(rn, operand.reg()); | 150 Register temp = temps.AcquireSameSizeAs(rn); |
148 EmitExtendShift(temp, operand.reg(), operand.extend(), | 151 EmitExtendShift(temp, operand.reg(), operand.extend(), |
149 operand.shift_amount()); | 152 operand.shift_amount()); |
150 Logical(rd, rn, temp, op); | 153 Logical(rd, rn, temp, op); |
151 | 154 |
152 } else { | 155 } else { |
153 // The operand can be encoded in the instruction. | 156 // The operand can be encoded in the instruction. |
154 ASSERT(operand.IsShiftedRegister()); | 157 ASSERT(operand.IsShiftedRegister()); |
155 Logical(rd, rn, operand, op); | 158 Logical(rd, rn, operand, op); |
156 } | 159 } |
157 } | 160 } |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
201 uint64_t ignored_halfword = 0; | 204 uint64_t ignored_halfword = 0; |
202 bool invert_move = false; | 205 bool invert_move = false; |
203 // If the number of 0xffff halfwords is greater than the number of 0x0000 | 206 // If the number of 0xffff halfwords is greater than the number of 0x0000 |
204 // halfwords, it's more efficient to use move-inverted. | 207 // halfwords, it's more efficient to use move-inverted. |
205 if (CountClearHalfWords(~imm, reg_size) > | 208 if (CountClearHalfWords(~imm, reg_size) > |
206 CountClearHalfWords(imm, reg_size)) { | 209 CountClearHalfWords(imm, reg_size)) { |
207 ignored_halfword = 0xffffL; | 210 ignored_halfword = 0xffffL; |
208 invert_move = true; | 211 invert_move = true; |
209 } | 212 } |
210 | 213 |
211 // Mov instructions can't move value into the stack pointer, so set up a | 214 // Mov instructions can't move immediate values into the stack pointer, so |
212 // temporary register, if needed. | 215 // set up a temporary register, if needed. |
213 Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd; | 216 UseScratchRegisterScope temps(this); |
| 217 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; |
214 | 218 |
215 // Iterate through the halfwords. Use movn/movz for the first non-ignored | 219 // Iterate through the halfwords. Use movn/movz for the first non-ignored |
216 // halfword, and movk for subsequent halfwords. | 220 // halfword, and movk for subsequent halfwords. |
217 ASSERT((reg_size % 16) == 0); | 221 ASSERT((reg_size % 16) == 0); |
218 bool first_mov_done = false; | 222 bool first_mov_done = false; |
219 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { | 223 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { |
220 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; | 224 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; |
221 if (imm16 != ignored_halfword) { | 225 if (imm16 != ignored_halfword) { |
222 if (!first_mov_done) { | 226 if (!first_mov_done) { |
223 if (invert_move) { | 227 if (invert_move) { |
(...skipping 17 matching lines...) Expand all Loading... |
241 } | 245 } |
242 } | 246 } |
243 } | 247 } |
244 | 248 |
245 | 249 |
246 void MacroAssembler::Mov(const Register& rd, | 250 void MacroAssembler::Mov(const Register& rd, |
247 const Operand& operand, | 251 const Operand& operand, |
248 DiscardMoveMode discard_mode) { | 252 DiscardMoveMode discard_mode) { |
249 ASSERT(allow_macro_instructions_); | 253 ASSERT(allow_macro_instructions_); |
250 ASSERT(!rd.IsZero()); | 254 ASSERT(!rd.IsZero()); |
| 255 |
251 // Provide a swap register for instructions that need to write into the | 256 // Provide a swap register for instructions that need to write into the |
252 // system stack pointer (and can't do this inherently). | 257 // system stack pointer (and can't do this inherently). |
253 Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd); | 258 UseScratchRegisterScope temps(this); |
| 259 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; |
254 | 260 |
255 if (operand.NeedsRelocation()) { | 261 if (operand.NeedsRelocation()) { |
256 LoadRelocated(dst, operand); | 262 LoadRelocated(dst, operand); |
257 | 263 |
258 } else if (operand.IsImmediate()) { | 264 } else if (operand.IsImmediate()) { |
259 // Call the macro assembler for generic immediates. | 265 // Call the macro assembler for generic immediates. |
260 Mov(dst, operand.immediate()); | 266 Mov(dst, operand.immediate()); |
261 | 267 |
262 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 268 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
263 // Emit a shift instruction if moving a shifted register. This operation | 269 // Emit a shift instruction if moving a shifted register. This operation |
(...skipping 20 matching lines...) Expand all Loading... |
284 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && | 290 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && |
285 (discard_mode == kDontDiscardForSameWReg))) { | 291 (discard_mode == kDontDiscardForSameWReg))) { |
286 Assembler::mov(rd, operand.reg()); | 292 Assembler::mov(rd, operand.reg()); |
287 } | 293 } |
288 // This case can handle writes into the system stack pointer directly. | 294 // This case can handle writes into the system stack pointer directly. |
289 dst = rd; | 295 dst = rd; |
290 } | 296 } |
291 | 297 |
292 // Copy the result to the system stack pointer. | 298 // Copy the result to the system stack pointer. |
293 if (!dst.Is(rd)) { | 299 if (!dst.Is(rd)) { |
294 ASSERT(rd.IsZero()); | 300 ASSERT(rd.IsSP()); |
295 ASSERT(dst.Is(Tmp1())); | |
296 Assembler::mov(rd, dst); | 301 Assembler::mov(rd, dst); |
297 } | 302 } |
298 } | 303 } |
299 | 304 |
300 | 305 |
301 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { | 306 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { |
302 ASSERT(allow_macro_instructions_); | 307 ASSERT(allow_macro_instructions_); |
303 | 308 |
304 if (operand.NeedsRelocation()) { | 309 if (operand.NeedsRelocation()) { |
305 LoadRelocated(Tmp0(), operand); | 310 LoadRelocated(rd, operand); |
306 Mvn(rd, Tmp0()); | 311 mvn(rd, rd); |
307 | 312 |
308 } else if (operand.IsImmediate()) { | 313 } else if (operand.IsImmediate()) { |
309 // Call the macro assembler for generic immediates. | 314 // Call the macro assembler for generic immediates. |
310 Mov(rd, ~operand.immediate()); | 315 Mov(rd, ~operand.immediate()); |
311 | 316 |
312 } else if (operand.IsExtendedRegister()) { | 317 } else if (operand.IsExtendedRegister()) { |
313 // Emit two instructions for the extend case. This differs from Mov, as | 318 // Emit two instructions for the extend case. This differs from Mov, as |
314 // the extend and invert can't be achieved in one instruction. | 319 // the extend and invert can't be achieved in one instruction. |
315 Register temp = AppropriateTempFor(rd, operand.reg()); | 320 EmitExtendShift(rd, operand.reg(), operand.extend(), |
316 EmitExtendShift(temp, operand.reg(), operand.extend(), | |
317 operand.shift_amount()); | 321 operand.shift_amount()); |
318 mvn(rd, temp); | 322 mvn(rd, rd); |
319 | 323 |
320 } else { | 324 } else { |
321 // Otherwise, emit a register move only if the registers are distinct. | |
322 // If the jssp is an operand, add #0 is emitted, otherwise, orr #0. | |
323 mvn(rd, operand); | 325 mvn(rd, operand); |
324 } | 326 } |
325 } | 327 } |
326 | 328 |
327 | 329 |
328 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { | 330 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { |
329 ASSERT((reg_size % 8) == 0); | 331 ASSERT((reg_size % 8) == 0); |
330 int count = 0; | 332 int count = 0; |
331 for (unsigned i = 0; i < (reg_size / 16); i++) { | 333 for (unsigned i = 0; i < (reg_size / 16); i++) { |
332 if ((imm & 0xffff) == 0) { | 334 if ((imm & 0xffff) == 0) { |
(...skipping 20 matching lines...) Expand all Loading... |
353 } | 355 } |
354 | 356 |
355 | 357 |
356 void MacroAssembler::ConditionalCompareMacro(const Register& rn, | 358 void MacroAssembler::ConditionalCompareMacro(const Register& rn, |
357 const Operand& operand, | 359 const Operand& operand, |
358 StatusFlags nzcv, | 360 StatusFlags nzcv, |
359 Condition cond, | 361 Condition cond, |
360 ConditionalCompareOp op) { | 362 ConditionalCompareOp op) { |
361 ASSERT((cond != al) && (cond != nv)); | 363 ASSERT((cond != al) && (cond != nv)); |
362 if (operand.NeedsRelocation()) { | 364 if (operand.NeedsRelocation()) { |
363 LoadRelocated(Tmp0(), operand); | 365 UseScratchRegisterScope temps(this); |
364 ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op); | 366 Register temp = temps.AcquireX(); |
| 367 LoadRelocated(temp, operand); |
| 368 ConditionalCompareMacro(rn, temp, nzcv, cond, op); |
365 | 369 |
366 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || | 370 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || |
367 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { | 371 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { |
368 // The immediate can be encoded in the instruction, or the operand is an | 372 // The immediate can be encoded in the instruction, or the operand is an |
369 // unshifted register: call the assembler. | 373 // unshifted register: call the assembler. |
370 ConditionalCompare(rn, operand, nzcv, cond, op); | 374 ConditionalCompare(rn, operand, nzcv, cond, op); |
371 | 375 |
372 } else { | 376 } else { |
373 // The operand isn't directly supported by the instruction: perform the | 377 // The operand isn't directly supported by the instruction: perform the |
374 // operation on a temporary register. | 378 // operation on a temporary register. |
375 Register temp = AppropriateTempFor(rn); | 379 UseScratchRegisterScope temps(this); |
| 380 Register temp = temps.AcquireSameSizeAs(rn); |
376 Mov(temp, operand); | 381 Mov(temp, operand); |
377 ConditionalCompare(rn, temp, nzcv, cond, op); | 382 ConditionalCompare(rn, temp, nzcv, cond, op); |
378 } | 383 } |
379 } | 384 } |
380 | 385 |
381 | 386 |
382 void MacroAssembler::Csel(const Register& rd, | 387 void MacroAssembler::Csel(const Register& rd, |
383 const Register& rn, | 388 const Register& rn, |
384 const Operand& operand, | 389 const Operand& operand, |
385 Condition cond) { | 390 Condition cond) { |
386 ASSERT(allow_macro_instructions_); | 391 ASSERT(allow_macro_instructions_); |
387 ASSERT(!rd.IsZero()); | 392 ASSERT(!rd.IsZero()); |
388 ASSERT((cond != al) && (cond != nv)); | 393 ASSERT((cond != al) && (cond != nv)); |
389 if (operand.IsImmediate()) { | 394 if (operand.IsImmediate()) { |
390 // Immediate argument. Handle special cases of 0, 1 and -1 using zero | 395 // Immediate argument. Handle special cases of 0, 1 and -1 using zero |
391 // register. | 396 // register. |
392 int64_t imm = operand.immediate(); | 397 int64_t imm = operand.immediate(); |
393 Register zr = AppropriateZeroRegFor(rn); | 398 Register zr = AppropriateZeroRegFor(rn); |
394 if (imm == 0) { | 399 if (imm == 0) { |
395 csel(rd, rn, zr, cond); | 400 csel(rd, rn, zr, cond); |
396 } else if (imm == 1) { | 401 } else if (imm == 1) { |
397 csinc(rd, rn, zr, cond); | 402 csinc(rd, rn, zr, cond); |
398 } else if (imm == -1) { | 403 } else if (imm == -1) { |
399 csinv(rd, rn, zr, cond); | 404 csinv(rd, rn, zr, cond); |
400 } else { | 405 } else { |
401 Register temp = AppropriateTempFor(rn); | 406 UseScratchRegisterScope temps(this); |
| 407 Register temp = temps.AcquireSameSizeAs(rn); |
402 Mov(temp, operand.immediate()); | 408 Mov(temp, operand.immediate()); |
403 csel(rd, rn, temp, cond); | 409 csel(rd, rn, temp, cond); |
404 } | 410 } |
405 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { | 411 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { |
406 // Unshifted register argument. | 412 // Unshifted register argument. |
407 csel(rd, rn, operand.reg(), cond); | 413 csel(rd, rn, operand.reg(), cond); |
408 } else { | 414 } else { |
409 // All other arguments. | 415 // All other arguments. |
410 Register temp = AppropriateTempFor(rn); | 416 UseScratchRegisterScope temps(this); |
| 417 Register temp = temps.AcquireSameSizeAs(rn); |
411 Mov(temp, operand); | 418 Mov(temp, operand); |
412 csel(rd, rn, temp, cond); | 419 csel(rd, rn, temp, cond); |
413 } | 420 } |
414 } | 421 } |
415 | 422 |
416 | 423 |
417 void MacroAssembler::AddSubMacro(const Register& rd, | 424 void MacroAssembler::AddSubMacro(const Register& rd, |
418 const Register& rn, | 425 const Register& rn, |
419 const Operand& operand, | 426 const Operand& operand, |
420 FlagsUpdate S, | 427 FlagsUpdate S, |
421 AddSubOp op) { | 428 AddSubOp op) { |
422 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && | 429 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && |
423 !operand.NeedsRelocation() && (S == LeaveFlags)) { | 430 !operand.NeedsRelocation() && (S == LeaveFlags)) { |
424 // The instruction would be a nop. Avoid generating useless code. | 431 // The instruction would be a nop. Avoid generating useless code. |
425 return; | 432 return; |
426 } | 433 } |
427 | 434 |
428 if (operand.NeedsRelocation()) { | 435 if (operand.NeedsRelocation()) { |
429 LoadRelocated(Tmp0(), operand); | 436 UseScratchRegisterScope temps(this); |
430 AddSubMacro(rd, rn, Tmp0(), S, op); | 437 Register temp = temps.AcquireX(); |
| 438 LoadRelocated(temp, operand); |
| 439 AddSubMacro(rd, rn, temp, S, op); |
431 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || | 440 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || |
432 (rn.IsZero() && !operand.IsShiftedRegister()) || | 441 (rn.IsZero() && !operand.IsShiftedRegister()) || |
433 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 442 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
434 Register temp = AppropriateTempFor(rn); | 443 UseScratchRegisterScope temps(this); |
| 444 Register temp = temps.AcquireSameSizeAs(rn); |
435 Mov(temp, operand); | 445 Mov(temp, operand); |
436 AddSub(rd, rn, temp, S, op); | 446 AddSub(rd, rn, temp, S, op); |
437 } else { | 447 } else { |
438 AddSub(rd, rn, operand, S, op); | 448 AddSub(rd, rn, operand, S, op); |
439 } | 449 } |
440 } | 450 } |
441 | 451 |
442 | 452 |
443 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, | 453 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, |
444 const Register& rn, | 454 const Register& rn, |
445 const Operand& operand, | 455 const Operand& operand, |
446 FlagsUpdate S, | 456 FlagsUpdate S, |
447 AddSubWithCarryOp op) { | 457 AddSubWithCarryOp op) { |
448 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 458 ASSERT(rd.SizeInBits() == rn.SizeInBits()); |
| 459 UseScratchRegisterScope temps(this); |
449 | 460 |
450 if (operand.NeedsRelocation()) { | 461 if (operand.NeedsRelocation()) { |
451 LoadRelocated(Tmp0(), operand); | 462 Register temp = temps.AcquireX(); |
452 AddSubWithCarryMacro(rd, rn, Tmp0(), S, op); | 463 LoadRelocated(temp, operand); |
| 464 AddSubWithCarryMacro(rd, rn, temp, S, op); |
453 | 465 |
454 } else if (operand.IsImmediate() || | 466 } else if (operand.IsImmediate() || |
455 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 467 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
456 // Add/sub with carry (immediate or ROR shifted register.) | 468 // Add/sub with carry (immediate or ROR shifted register.) |
457 Register temp = AppropriateTempFor(rn); | 469 Register temp = temps.AcquireSameSizeAs(rn); |
458 Mov(temp, operand); | 470 Mov(temp, operand); |
459 AddSubWithCarry(rd, rn, temp, S, op); | 471 AddSubWithCarry(rd, rn, temp, S, op); |
| 472 |
460 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 473 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
461 // Add/sub with carry (shifted register). | 474 // Add/sub with carry (shifted register). |
462 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | 475 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); |
463 ASSERT(operand.shift() != ROR); | 476 ASSERT(operand.shift() != ROR); |
464 ASSERT(is_uintn(operand.shift_amount(), | 477 ASSERT( |
465 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2)); | 478 is_uintn(operand.shift_amount(), |
466 Register temp = AppropriateTempFor(rn, operand.reg()); | 479 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2)); |
| 480 Register temp = temps.AcquireSameSizeAs(rn); |
467 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); | 481 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); |
468 AddSubWithCarry(rd, rn, temp, S, op); | 482 AddSubWithCarry(rd, rn, temp, S, op); |
469 | 483 |
470 } else if (operand.IsExtendedRegister()) { | 484 } else if (operand.IsExtendedRegister()) { |
471 // Add/sub with carry (extended register). | 485 // Add/sub with carry (extended register). |
472 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 486 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
473 // Add/sub extended supports a shift <= 4. We want to support exactly the | 487 // Add/sub extended supports a shift <= 4. We want to support exactly the |
474 // same modes. | 488 // same modes. |
475 ASSERT(operand.shift_amount() <= 4); | 489 ASSERT(operand.shift_amount() <= 4); |
476 ASSERT(operand.reg().Is64Bits() || | 490 ASSERT(operand.reg().Is64Bits() || |
477 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | 491 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
478 Register temp = AppropriateTempFor(rn, operand.reg()); | 492 Register temp = temps.AcquireSameSizeAs(rn); |
479 EmitExtendShift(temp, operand.reg(), operand.extend(), | 493 EmitExtendShift(temp, operand.reg(), operand.extend(), |
480 operand.shift_amount()); | 494 operand.shift_amount()); |
481 AddSubWithCarry(rd, rn, temp, S, op); | 495 AddSubWithCarry(rd, rn, temp, S, op); |
482 | 496 |
483 } else { | 497 } else { |
484 // The addressing mode is directly supported by the instruction. | 498 // The addressing mode is directly supported by the instruction. |
485 AddSubWithCarry(rd, rn, operand, S, op); | 499 AddSubWithCarry(rd, rn, operand, S, op); |
486 } | 500 } |
487 } | 501 } |
488 | 502 |
489 | 503 |
490 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, | 504 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, |
491 const MemOperand& addr, | 505 const MemOperand& addr, |
492 LoadStoreOp op) { | 506 LoadStoreOp op) { |
493 int64_t offset = addr.offset(); | 507 int64_t offset = addr.offset(); |
494 LSDataSize size = CalcLSDataSize(op); | 508 LSDataSize size = CalcLSDataSize(op); |
495 | 509 |
496 // Check if an immediate offset fits in the immediate field of the | 510 // Check if an immediate offset fits in the immediate field of the |
497 // appropriate instruction. If not, emit two instructions to perform | 511 // appropriate instruction. If not, emit two instructions to perform |
498 // the operation. | 512 // the operation. |
499 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && | 513 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && |
500 !IsImmLSUnscaled(offset)) { | 514 !IsImmLSUnscaled(offset)) { |
501 // Immediate offset that can't be encoded using unsigned or unscaled | 515 // Immediate offset that can't be encoded using unsigned or unscaled |
502 // addressing modes. | 516 // addressing modes. |
503 Register temp = AppropriateTempFor(addr.base()); | 517 UseScratchRegisterScope temps(this); |
| 518 Register temp = temps.AcquireSameSizeAs(addr.base()); |
504 Mov(temp, addr.offset()); | 519 Mov(temp, addr.offset()); |
505 LoadStore(rt, MemOperand(addr.base(), temp), op); | 520 LoadStore(rt, MemOperand(addr.base(), temp), op); |
506 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { | 521 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { |
507 // Post-index beyond unscaled addressing range. | 522 // Post-index beyond unscaled addressing range. |
508 LoadStore(rt, MemOperand(addr.base()), op); | 523 LoadStore(rt, MemOperand(addr.base()), op); |
509 add(addr.base(), addr.base(), offset); | 524 add(addr.base(), addr.base(), offset); |
510 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { | 525 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { |
511 // Pre-index beyond unscaled addressing range. | 526 // Pre-index beyond unscaled addressing range. |
512 add(addr.base(), addr.base(), offset); | 527 add(addr.base(), addr.base(), offset); |
513 LoadStore(rt, MemOperand(addr.base()), op); | 528 LoadStore(rt, MemOperand(addr.base()), op); |
(...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
849 } | 864 } |
850 } | 865 } |
851 | 866 |
852 | 867 |
853 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { | 868 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { |
854 int size = src.SizeInBytes(); | 869 int size = src.SizeInBytes(); |
855 | 870 |
856 PrepareForPush(count, size); | 871 PrepareForPush(count, size); |
857 | 872 |
858 if (FLAG_optimize_for_size && count > 8) { | 873 if (FLAG_optimize_for_size && count > 8) { |
| 874 UseScratchRegisterScope temps(this); |
| 875 Register temp = temps.AcquireX(); |
| 876 |
859 Label loop; | 877 Label loop; |
860 __ Mov(Tmp0(), count / 2); | 878 __ Mov(temp, count / 2); |
861 __ Bind(&loop); | 879 __ Bind(&loop); |
862 PushHelper(2, size, src, src, NoReg, NoReg); | 880 PushHelper(2, size, src, src, NoReg, NoReg); |
863 __ Subs(Tmp0(), Tmp0(), 1); | 881 __ Subs(temp, temp, 1); |
864 __ B(ne, &loop); | 882 __ B(ne, &loop); |
865 | 883 |
866 count %= 2; | 884 count %= 2; |
867 } | 885 } |
868 | 886 |
869 // Push up to four registers at a time if possible because if the current | 887 // Push up to four registers at a time if possible because if the current |
870 // stack pointer is csp and the register size is 32, registers must be pushed | 888 // stack pointer is csp and the register size is 32, registers must be pushed |
871 // in blocks of four in order to maintain the 16-byte alignment for csp. | 889 // in blocks of four in order to maintain the 16-byte alignment for csp. |
872 while (count >= 4) { | 890 while (count >= 4) { |
873 PushHelper(4, size, src, src, src, src); | 891 PushHelper(4, size, src, src, src, src); |
874 count -= 4; | 892 count -= 4; |
875 } | 893 } |
876 if (count >= 2) { | 894 if (count >= 2) { |
877 PushHelper(2, size, src, src, NoReg, NoReg); | 895 PushHelper(2, size, src, src, NoReg, NoReg); |
878 count -= 2; | 896 count -= 2; |
879 } | 897 } |
880 if (count == 1) { | 898 if (count == 1) { |
881 PushHelper(1, size, src, NoReg, NoReg, NoReg); | 899 PushHelper(1, size, src, NoReg, NoReg, NoReg); |
882 count -= 1; | 900 count -= 1; |
883 } | 901 } |
884 ASSERT(count == 0); | 902 ASSERT(count == 0); |
885 } | 903 } |
886 | 904 |
887 | 905 |
888 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { | 906 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { |
889 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); | 907 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); |
890 | 908 |
891 Register temp = AppropriateTempFor(count); | 909 UseScratchRegisterScope temps(this); |
| 910 Register temp = temps.AcquireSameSizeAs(count); |
892 | 911 |
893 if (FLAG_optimize_for_size) { | 912 if (FLAG_optimize_for_size) { |
894 Label loop, done; | 913 Label loop, done; |
895 | 914 |
896 Subs(temp, count, 1); | 915 Subs(temp, count, 1); |
897 B(mi, &done); | 916 B(mi, &done); |
898 | 917 |
899 // Push all registers individually, to save code size. | 918 // Push all registers individually, to save code size. |
900 Bind(&loop); | 919 Bind(&loop); |
901 Subs(temp, temp, 1); | 920 Subs(temp, temp, 1); |
(...skipping 423 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1325 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); | 1344 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); |
1326 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); | 1345 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); |
1327 Br(scratch1); | 1346 Br(scratch1); |
1328 } | 1347 } |
1329 | 1348 |
1330 | 1349 |
1331 void MacroAssembler::InNewSpace(Register object, | 1350 void MacroAssembler::InNewSpace(Register object, |
1332 Condition cond, | 1351 Condition cond, |
1333 Label* branch) { | 1352 Label* branch) { |
1334 ASSERT(cond == eq || cond == ne); | 1353 ASSERT(cond == eq || cond == ne); |
1335 // Use Tmp1() to have a different destination register, as Tmp0() will be used | 1354 UseScratchRegisterScope temps(this); |
1336 // for relocation. | 1355 Register temp = temps.AcquireX(); |
1337 And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate()))); | 1356 And(temp, object, Operand(ExternalReference::new_space_mask(isolate()))); |
1338 Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate()))); | 1357 Cmp(temp, Operand(ExternalReference::new_space_start(isolate()))); |
1339 B(cond, branch); | 1358 B(cond, branch); |
1340 } | 1359 } |
1341 | 1360 |
1342 | 1361 |
1343 void MacroAssembler::Throw(Register value, | 1362 void MacroAssembler::Throw(Register value, |
1344 Register scratch1, | 1363 Register scratch1, |
1345 Register scratch2, | 1364 Register scratch2, |
1346 Register scratch3, | 1365 Register scratch3, |
1347 Register scratch4) { | 1366 Register scratch4) { |
1348 // Adjust this code if not the case. | 1367 // Adjust this code if not the case. |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1497 | 1516 |
1498 void MacroAssembler::AssertName(Register object) { | 1517 void MacroAssembler::AssertName(Register object) { |
1499 if (emit_debug_code()) { | 1518 if (emit_debug_code()) { |
1500 STATIC_ASSERT(kSmiTag == 0); | 1519 STATIC_ASSERT(kSmiTag == 0); |
1501 // TODO(jbramley): Add AbortIfSmi and related functions. | 1520 // TODO(jbramley): Add AbortIfSmi and related functions. |
1502 Label not_smi; | 1521 Label not_smi; |
1503 JumpIfNotSmi(object, ¬_smi); | 1522 JumpIfNotSmi(object, ¬_smi); |
1504 Abort(kOperandIsASmiAndNotAName); | 1523 Abort(kOperandIsASmiAndNotAName); |
1505 Bind(¬_smi); | 1524 Bind(¬_smi); |
1506 | 1525 |
1507 Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset)); | 1526 UseScratchRegisterScope temps(this); |
1508 CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE); | 1527 Register temp = temps.AcquireX(); |
| 1528 |
| 1529 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 1530 CompareInstanceType(temp, temp, LAST_NAME_TYPE); |
1509 Check(ls, kOperandIsNotAName); | 1531 Check(ls, kOperandIsNotAName); |
1510 } | 1532 } |
1511 } | 1533 } |
1512 | 1534 |
1513 | 1535 |
1514 void MacroAssembler::AssertString(Register object) { | 1536 void MacroAssembler::AssertString(Register object) { |
1515 if (emit_debug_code()) { | 1537 if (emit_debug_code()) { |
1516 Register temp = Tmp1(); | 1538 UseScratchRegisterScope temps(this); |
| 1539 Register temp = temps.AcquireX(); |
1517 STATIC_ASSERT(kSmiTag == 0); | 1540 STATIC_ASSERT(kSmiTag == 0); |
1518 Tst(object, kSmiTagMask); | 1541 Tst(object, kSmiTagMask); |
1519 Check(ne, kOperandIsASmiAndNotAString); | 1542 Check(ne, kOperandIsASmiAndNotAString); |
1520 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 1543 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
1521 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | 1544 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); |
1522 Check(lo, kOperandIsNotAString); | 1545 Check(lo, kOperandIsNotAString); |
1523 } | 1546 } |
1524 } | 1547 } |
1525 | 1548 |
1526 | 1549 |
(...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1823 | 1846 |
1824 void MacroAssembler::CallCFunction(ExternalReference function, | 1847 void MacroAssembler::CallCFunction(ExternalReference function, |
1825 int num_of_reg_args) { | 1848 int num_of_reg_args) { |
1826 CallCFunction(function, num_of_reg_args, 0); | 1849 CallCFunction(function, num_of_reg_args, 0); |
1827 } | 1850 } |
1828 | 1851 |
1829 | 1852 |
1830 void MacroAssembler::CallCFunction(ExternalReference function, | 1853 void MacroAssembler::CallCFunction(ExternalReference function, |
1831 int num_of_reg_args, | 1854 int num_of_reg_args, |
1832 int num_of_double_args) { | 1855 int num_of_double_args) { |
1833 Mov(Tmp0(), Operand(function)); | 1856 UseScratchRegisterScope temps(this); |
1834 CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args); | 1857 Register temp = temps.AcquireX(); |
| 1858 Mov(temp, Operand(function)); |
| 1859 CallCFunction(temp, num_of_reg_args, num_of_double_args); |
1835 } | 1860 } |
1836 | 1861 |
1837 | 1862 |
1838 void MacroAssembler::CallCFunction(Register function, | 1863 void MacroAssembler::CallCFunction(Register function, |
1839 int num_of_reg_args, | 1864 int num_of_reg_args, |
1840 int num_of_double_args) { | 1865 int num_of_double_args) { |
1841 ASSERT(has_frame()); | 1866 ASSERT(has_frame()); |
1842 // We can pass 8 integer arguments in registers. If we need to pass more than | 1867 // We can pass 8 integer arguments in registers. If we need to pass more than |
1843 // that, we'll need to implement support for passing them on the stack. | 1868 // that, we'll need to implement support for passing them on the stack. |
1844 ASSERT(num_of_reg_args <= 8); | 1869 ASSERT(num_of_reg_args <= 8); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1877 | 1902 |
1878 // Call directly. The function called cannot cause a GC, or allow preemption, | 1903 // Call directly. The function called cannot cause a GC, or allow preemption, |
1879 // so the return address in the link register stays correct. | 1904 // so the return address in the link register stays correct. |
1880 Call(function); | 1905 Call(function); |
1881 | 1906 |
1882 if (!csp.Is(old_stack_pointer)) { | 1907 if (!csp.Is(old_stack_pointer)) { |
1883 if (emit_debug_code()) { | 1908 if (emit_debug_code()) { |
1884 // Because the stack pointer must be aligned on a 16-byte boundary, the | 1909 // Because the stack pointer must be aligned on a 16-byte boundary, the |
1885 // aligned csp can be up to 12 bytes below the jssp. This is the case | 1910 // aligned csp can be up to 12 bytes below the jssp. This is the case |
1886 // where we only pushed one W register on top of an aligned jssp. | 1911 // where we only pushed one W register on top of an aligned jssp. |
1887 Register temp = Tmp1(); | 1912 UseScratchRegisterScope temps(this); |
| 1913 Register temp = temps.AcquireX(); |
1888 ASSERT(ActivationFrameAlignment() == 16); | 1914 ASSERT(ActivationFrameAlignment() == 16); |
1889 Sub(temp, csp, old_stack_pointer); | 1915 Sub(temp, csp, old_stack_pointer); |
1890 // We want temp <= 0 && temp >= -12. | 1916 // We want temp <= 0 && temp >= -12. |
1891 Cmp(temp, 0); | 1917 Cmp(temp, 0); |
1892 Ccmp(temp, -12, NFlag, le); | 1918 Ccmp(temp, -12, NFlag, le); |
1893 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); | 1919 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); |
1894 } | 1920 } |
1895 SetStackPointer(old_stack_pointer); | 1921 SetStackPointer(old_stack_pointer); |
1896 } | 1922 } |
1897 } | 1923 } |
1898 | 1924 |
1899 | 1925 |
1900 void MacroAssembler::Jump(Register target) { | 1926 void MacroAssembler::Jump(Register target) { |
1901 Br(target); | 1927 Br(target); |
1902 } | 1928 } |
1903 | 1929 |
1904 | 1930 |
1905 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { | 1931 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { |
1906 Mov(Tmp0(), Operand(target, rmode)); | 1932 UseScratchRegisterScope temps(this); |
1907 Br(Tmp0()); | 1933 Register temp = temps.AcquireX(); |
| 1934 Mov(temp, Operand(target, rmode)); |
| 1935 Br(temp); |
1908 } | 1936 } |
1909 | 1937 |
1910 | 1938 |
1911 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { | 1939 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { |
1912 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | 1940 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
1913 Jump(reinterpret_cast<intptr_t>(target), rmode); | 1941 Jump(reinterpret_cast<intptr_t>(target), rmode); |
1914 } | 1942 } |
1915 | 1943 |
1916 | 1944 |
1917 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { | 1945 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1959 Label start_call; | 1987 Label start_call; |
1960 Bind(&start_call); | 1988 Bind(&start_call); |
1961 #endif | 1989 #endif |
1962 // Statement positions are expected to be recorded when the target | 1990 // Statement positions are expected to be recorded when the target |
1963 // address is loaded. | 1991 // address is loaded. |
1964 positions_recorder()->WriteRecordedPositions(); | 1992 positions_recorder()->WriteRecordedPositions(); |
1965 | 1993 |
1966 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | 1994 // Addresses always have 64 bits, so we shouldn't encounter NONE32. |
1967 ASSERT(rmode != RelocInfo::NONE32); | 1995 ASSERT(rmode != RelocInfo::NONE32); |
1968 | 1996 |
| 1997 UseScratchRegisterScope temps(this); |
| 1998 Register temp = temps.AcquireX(); |
| 1999 |
1969 if (rmode == RelocInfo::NONE64) { | 2000 if (rmode == RelocInfo::NONE64) { |
1970 uint64_t imm = reinterpret_cast<uint64_t>(target); | 2001 uint64_t imm = reinterpret_cast<uint64_t>(target); |
1971 movz(Tmp0(), (imm >> 0) & 0xffff, 0); | 2002 movz(temp, (imm >> 0) & 0xffff, 0); |
1972 movk(Tmp0(), (imm >> 16) & 0xffff, 16); | 2003 movk(temp, (imm >> 16) & 0xffff, 16); |
1973 movk(Tmp0(), (imm >> 32) & 0xffff, 32); | 2004 movk(temp, (imm >> 32) & 0xffff, 32); |
1974 movk(Tmp0(), (imm >> 48) & 0xffff, 48); | 2005 movk(temp, (imm >> 48) & 0xffff, 48); |
1975 } else { | 2006 } else { |
1976 LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode)); | 2007 LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode)); |
1977 } | 2008 } |
1978 Blr(Tmp0()); | 2009 Blr(temp); |
1979 #ifdef DEBUG | 2010 #ifdef DEBUG |
1980 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); | 2011 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); |
1981 #endif | 2012 #endif |
1982 } | 2013 } |
1983 | 2014 |
1984 | 2015 |
1985 void MacroAssembler::Call(Handle<Code> code, | 2016 void MacroAssembler::Call(Handle<Code> code, |
1986 RelocInfo::Mode rmode, | 2017 RelocInfo::Mode rmode, |
1987 TypeFeedbackId ast_id) { | 2018 TypeFeedbackId ast_id) { |
1988 #ifdef DEBUG | 2019 #ifdef DEBUG |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2049 | 2080 |
2050 | 2081 |
2051 | 2082 |
2052 | 2083 |
2053 | 2084 |
2054 void MacroAssembler::JumpForHeapNumber(Register object, | 2085 void MacroAssembler::JumpForHeapNumber(Register object, |
2055 Register heap_number_map, | 2086 Register heap_number_map, |
2056 Label* on_heap_number, | 2087 Label* on_heap_number, |
2057 Label* on_not_heap_number) { | 2088 Label* on_not_heap_number) { |
2058 ASSERT(on_heap_number || on_not_heap_number); | 2089 ASSERT(on_heap_number || on_not_heap_number); |
2059 // Tmp0() is used as a scratch register. | |
2060 ASSERT(!AreAliased(Tmp0(), heap_number_map)); | |
2061 AssertNotSmi(object); | 2090 AssertNotSmi(object); |
2062 | 2091 |
| 2092 UseScratchRegisterScope temps(this); |
| 2093 Register temp = temps.AcquireX(); |
| 2094 |
2063 // Load the HeapNumber map if it is not passed. | 2095 // Load the HeapNumber map if it is not passed. |
2064 if (heap_number_map.Is(NoReg)) { | 2096 if (heap_number_map.Is(NoReg)) { |
2065 heap_number_map = Tmp1(); | 2097 heap_number_map = temps.AcquireX(); |
2066 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2098 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2067 } else { | 2099 } else { |
2068 // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map. | |
2069 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2100 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2070 } | 2101 } |
2071 | 2102 |
2072 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 2103 ASSERT(!AreAliased(temp, heap_number_map)); |
2073 Cmp(Tmp0(), heap_number_map); | 2104 |
| 2105 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2106 Cmp(temp, heap_number_map); |
2074 | 2107 |
2075 if (on_heap_number) { | 2108 if (on_heap_number) { |
2076 B(eq, on_heap_number); | 2109 B(eq, on_heap_number); |
2077 } | 2110 } |
2078 if (on_not_heap_number) { | 2111 if (on_not_heap_number) { |
2079 B(ne, on_not_heap_number); | 2112 B(ne, on_not_heap_number); |
2080 } | 2113 } |
2081 } | 2114 } |
2082 | 2115 |
2083 | 2116 |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2189 B(on_successful_conversion, eq); | 2222 B(on_successful_conversion, eq); |
2190 } | 2223 } |
2191 if (on_failed_conversion) { | 2224 if (on_failed_conversion) { |
2192 B(on_failed_conversion, ne); | 2225 B(on_failed_conversion, ne); |
2193 } | 2226 } |
2194 } | 2227 } |
2195 | 2228 |
2196 | 2229 |
2197 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, | 2230 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, |
2198 Label* on_negative_zero) { | 2231 Label* on_negative_zero) { |
| 2232 UseScratchRegisterScope temps(this); |
| 2233 Register temp = temps.AcquireX(); |
2199 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will | 2234 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will |
2200 // cause overflow. | 2235 // cause overflow. |
2201 Fmov(Tmp0(), input); | 2236 Fmov(temp, input); |
2202 Cmp(Tmp0(), 1); | 2237 Cmp(temp, 1); |
2203 B(vs, on_negative_zero); | 2238 B(vs, on_negative_zero); |
2204 } | 2239 } |
2205 | 2240 |
2206 | 2241 |
2207 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { | 2242 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { |
2208 // Clamp the value to [0..255]. | 2243 // Clamp the value to [0..255]. |
2209 Cmp(input.W(), Operand(input.W(), UXTB)); | 2244 Cmp(input.W(), Operand(input.W(), UXTB)); |
2210 // If input < input & 0xff, it must be < 0, so saturate to 0. | 2245 // If input < input & 0xff, it must be < 0, so saturate to 0. |
2211 Csel(output.W(), wzr, input.W(), lt); | 2246 Csel(output.W(), wzr, input.W(), lt); |
2212 // Create a constant 0xff. | 2247 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255. |
2213 Mov(WTmp0(), 255); | 2248 Csel(output.W(), output.W(), 255, le); |
2214 // If input > input & 0xff, it must be > 255, so saturate to 255. | |
2215 Csel(output.W(), WTmp0(), output.W(), gt); | |
2216 } | 2249 } |
2217 | 2250 |
2218 | 2251 |
2219 void MacroAssembler::ClampInt32ToUint8(Register in_out) { | 2252 void MacroAssembler::ClampInt32ToUint8(Register in_out) { |
2220 ClampInt32ToUint8(in_out, in_out); | 2253 ClampInt32ToUint8(in_out, in_out); |
2221 } | 2254 } |
2222 | 2255 |
2223 | 2256 |
2224 void MacroAssembler::ClampDoubleToUint8(Register output, | 2257 void MacroAssembler::ClampDoubleToUint8(Register output, |
2225 DoubleRegister input, | 2258 DoubleRegister input, |
(...skipping 13 matching lines...) Expand all Loading... |
2239 // Values greater than 255 have already been clamped to 255. | 2272 // Values greater than 255 have already been clamped to 255. |
2240 Fcvtnu(output, dbl_scratch); | 2273 Fcvtnu(output, dbl_scratch); |
2241 } | 2274 } |
2242 | 2275 |
2243 | 2276 |
2244 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, | 2277 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, |
2245 Register src, | 2278 Register src, |
2246 unsigned count, | 2279 unsigned count, |
2247 Register scratch1, | 2280 Register scratch1, |
2248 Register scratch2, | 2281 Register scratch2, |
2249 Register scratch3) { | 2282 Register scratch3, |
| 2283 Register scratch4, |
| 2284 Register scratch5) { |
2250 // Untag src and dst into scratch registers. | 2285 // Untag src and dst into scratch registers. |
2251 // Copy src->dst in a tight loop. | 2286 // Copy src->dst in a tight loop. |
2252 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1())); | 2287 ASSERT(!AreAliased(dst, src, |
| 2288 scratch1, scratch2, scratch3, scratch4, scratch5)); |
2253 ASSERT(count >= 2); | 2289 ASSERT(count >= 2); |
2254 | 2290 |
2255 const Register& remaining = scratch3; | 2291 const Register& remaining = scratch3; |
2256 Mov(remaining, count / 2); | 2292 Mov(remaining, count / 2); |
2257 | 2293 |
2258 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
2259 InstructionAccurateScope scope(this); | |
2260 | |
2261 const Register& dst_untagged = scratch1; | 2294 const Register& dst_untagged = scratch1; |
2262 const Register& src_untagged = scratch2; | 2295 const Register& src_untagged = scratch2; |
2263 sub(dst_untagged, dst, kHeapObjectTag); | 2296 Sub(dst_untagged, dst, kHeapObjectTag); |
2264 sub(src_untagged, src, kHeapObjectTag); | 2297 Sub(src_untagged, src, kHeapObjectTag); |
2265 | 2298 |
2266 // Copy fields in pairs. | 2299 // Copy fields in pairs. |
2267 Label loop; | 2300 Label loop; |
2268 bind(&loop); | 2301 Bind(&loop); |
2269 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, | 2302 Ldp(scratch4, scratch5, |
2270 PostIndex)); | 2303 MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex)); |
2271 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, | 2304 Stp(scratch4, scratch5, |
2272 PostIndex)); | 2305 MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex)); |
2273 sub(remaining, remaining, 1); | 2306 Sub(remaining, remaining, 1); |
2274 cbnz(remaining, &loop); | 2307 Cbnz(remaining, &loop); |
2275 | 2308 |
2276 // Handle the leftovers. | 2309 // Handle the leftovers. |
2277 if (count & 1) { | 2310 if (count & 1) { |
2278 ldr(Tmp0(), MemOperand(src_untagged)); | 2311 Ldr(scratch4, MemOperand(src_untagged)); |
2279 str(Tmp0(), MemOperand(dst_untagged)); | 2312 Str(scratch4, MemOperand(dst_untagged)); |
2280 } | 2313 } |
2281 } | 2314 } |
2282 | 2315 |
2283 | 2316 |
2284 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, | 2317 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, |
2285 Register src, | 2318 Register src, |
2286 unsigned count, | 2319 unsigned count, |
2287 Register scratch1, | 2320 Register scratch1, |
2288 Register scratch2) { | 2321 Register scratch2, |
| 2322 Register scratch3, |
| 2323 Register scratch4) { |
2289 // Untag src and dst into scratch registers. | 2324 // Untag src and dst into scratch registers. |
2290 // Copy src->dst in an unrolled loop. | 2325 // Copy src->dst in an unrolled loop. |
2291 ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1())); | 2326 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); |
2292 | |
2293 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
2294 InstructionAccurateScope scope(this); | |
2295 | 2327 |
2296 const Register& dst_untagged = scratch1; | 2328 const Register& dst_untagged = scratch1; |
2297 const Register& src_untagged = scratch2; | 2329 const Register& src_untagged = scratch2; |
2298 sub(dst_untagged, dst, kHeapObjectTag); | 2330 sub(dst_untagged, dst, kHeapObjectTag); |
2299 sub(src_untagged, src, kHeapObjectTag); | 2331 sub(src_untagged, src, kHeapObjectTag); |
2300 | 2332 |
2301 // Copy fields in pairs. | 2333 // Copy fields in pairs. |
2302 for (unsigned i = 0; i < count / 2; i++) { | 2334 for (unsigned i = 0; i < count / 2; i++) { |
2303 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2, | 2335 Ldp(scratch3, scratch4, |
2304 PostIndex)); | 2336 MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex)); |
2305 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2, | 2337 Stp(scratch3, scratch4, |
2306 PostIndex)); | 2338 MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex)); |
2307 } | 2339 } |
2308 | 2340 |
2309 // Handle the leftovers. | 2341 // Handle the leftovers. |
2310 if (count & 1) { | 2342 if (count & 1) { |
2311 ldr(Tmp0(), MemOperand(src_untagged)); | 2343 Ldr(scratch3, MemOperand(src_untagged)); |
2312 str(Tmp0(), MemOperand(dst_untagged)); | 2344 Str(scratch3, MemOperand(dst_untagged)); |
2313 } | 2345 } |
2314 } | 2346 } |
2315 | 2347 |
2316 | 2348 |
2317 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, | 2349 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, |
2318 Register src, | 2350 Register src, |
2319 unsigned count, | 2351 unsigned count, |
2320 Register scratch1) { | 2352 Register scratch1, |
| 2353 Register scratch2, |
| 2354 Register scratch3) { |
2321 // Untag src and dst into scratch registers. | 2355 // Untag src and dst into scratch registers. |
2322 // Copy src->dst in an unrolled loop. | 2356 // Copy src->dst in an unrolled loop. |
2323 ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1())); | 2357 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3)); |
2324 | |
2325 // Only use the Assembler, so we can use Tmp0() and Tmp1(). | |
2326 InstructionAccurateScope scope(this); | |
2327 | 2358 |
2328 const Register& dst_untagged = scratch1; | 2359 const Register& dst_untagged = scratch1; |
2329 const Register& src_untagged = Tmp1(); | 2360 const Register& src_untagged = scratch2; |
2330 sub(dst_untagged, dst, kHeapObjectTag); | 2361 Sub(dst_untagged, dst, kHeapObjectTag); |
2331 sub(src_untagged, src, kHeapObjectTag); | 2362 Sub(src_untagged, src, kHeapObjectTag); |
2332 | 2363 |
2333 // Copy fields one by one. | 2364 // Copy fields one by one. |
2334 for (unsigned i = 0; i < count; i++) { | 2365 for (unsigned i = 0; i < count; i++) { |
2335 ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex)); | 2366 Ldr(scratch3, MemOperand(src_untagged, kXRegSizeInBytes, PostIndex)); |
2336 str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex)); | 2367 Str(scratch3, MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex)); |
2337 } | 2368 } |
2338 } | 2369 } |
2339 | 2370 |
2340 | 2371 |
2341 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, | 2372 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, |
2342 unsigned count) { | 2373 unsigned count) { |
2343 // One of two methods is used: | 2374 // One of two methods is used: |
2344 // | 2375 // |
2345 // For high 'count' values where many scratch registers are available: | 2376 // For high 'count' values where many scratch registers are available: |
2346 // Untag src and dst into scratch registers. | 2377 // Untag src and dst into scratch registers. |
2347 // Copy src->dst in a tight loop. | 2378 // Copy src->dst in a tight loop. |
2348 // | 2379 // |
2349 // For low 'count' values or where few scratch registers are available: | 2380 // For low 'count' values or where few scratch registers are available: |
2350 // Untag src and dst into scratch registers. | 2381 // Untag src and dst into scratch registers. |
2351 // Copy src->dst in an unrolled loop. | 2382 // Copy src->dst in an unrolled loop. |
2352 // | 2383 // |
2353 // In both cases, fields are copied in pairs if possible, and left-overs are | 2384 // In both cases, fields are copied in pairs if possible, and left-overs are |
2354 // handled separately. | 2385 // handled separately. |
| 2386 ASSERT(!AreAliased(dst, src)); |
2355 ASSERT(!temps.IncludesAliasOf(dst)); | 2387 ASSERT(!temps.IncludesAliasOf(dst)); |
2356 ASSERT(!temps.IncludesAliasOf(src)); | 2388 ASSERT(!temps.IncludesAliasOf(src)); |
2357 ASSERT(!temps.IncludesAliasOf(Tmp0())); | |
2358 ASSERT(!temps.IncludesAliasOf(Tmp1())); | |
2359 ASSERT(!temps.IncludesAliasOf(xzr)); | 2389 ASSERT(!temps.IncludesAliasOf(xzr)); |
2360 ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1())); | |
2361 | 2390 |
2362 if (emit_debug_code()) { | 2391 if (emit_debug_code()) { |
2363 Cmp(dst, src); | 2392 Cmp(dst, src); |
2364 Check(ne, kTheSourceAndDestinationAreTheSame); | 2393 Check(ne, kTheSourceAndDestinationAreTheSame); |
2365 } | 2394 } |
2366 | 2395 |
2367 // The value of 'count' at which a loop will be generated (if there are | 2396 // The value of 'count' at which a loop will be generated (if there are |
2368 // enough scratch registers). | 2397 // enough scratch registers). |
2369 static const unsigned kLoopThreshold = 8; | 2398 static const unsigned kLoopThreshold = 8; |
2370 | 2399 |
2371 ASSERT(!temps.IsEmpty()); | 2400 UseScratchRegisterScope masm_temps(this); |
2372 Register scratch1 = Register(temps.PopLowestIndex()); | 2401 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) { |
2373 Register scratch2 = Register(temps.PopLowestIndex()); | 2402 CopyFieldsLoopPairsHelper(dst, src, count, |
2374 Register scratch3 = Register(temps.PopLowestIndex()); | 2403 Register(temps.PopLowestIndex()), |
2375 | 2404 Register(temps.PopLowestIndex()), |
2376 if (scratch3.IsValid() && (count >= kLoopThreshold)) { | 2405 Register(temps.PopLowestIndex()), |
2377 CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3); | 2406 masm_temps.AcquireX(), |
2378 } else if (scratch2.IsValid()) { | 2407 masm_temps.AcquireX()); |
2379 CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2); | 2408 } else if (temps.Count() >= 2) { |
2380 } else if (scratch1.IsValid()) { | 2409 CopyFieldsUnrolledPairsHelper(dst, src, count, |
2381 CopyFieldsUnrolledHelper(dst, src, count, scratch1); | 2410 Register(temps.PopLowestIndex()), |
| 2411 Register(temps.PopLowestIndex()), |
| 2412 masm_temps.AcquireX(), |
| 2413 masm_temps.AcquireX()); |
| 2414 } else if (temps.Count() == 1) { |
| 2415 CopyFieldsUnrolledHelper(dst, src, count, |
| 2416 Register(temps.PopLowestIndex()), |
| 2417 masm_temps.AcquireX(), |
| 2418 masm_temps.AcquireX()); |
2382 } else { | 2419 } else { |
2383 UNREACHABLE(); | 2420 UNREACHABLE(); |
2384 } | 2421 } |
2385 } | 2422 } |
2386 | 2423 |
2387 | 2424 |
2388 void MacroAssembler::CopyBytes(Register dst, | 2425 void MacroAssembler::CopyBytes(Register dst, |
2389 Register src, | 2426 Register src, |
2390 Register length, | 2427 Register length, |
2391 Register scratch, | 2428 Register scratch, |
(...skipping 409 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2801 | 2838 |
2802 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: | 2839 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: |
2803 // https://code.google.com/p/v8/issues/detail?id=3149 | 2840 // https://code.google.com/p/v8/issues/detail?id=3149 |
2804 Sxtw(result, result.W()); | 2841 Sxtw(result, result.W()); |
2805 } | 2842 } |
2806 | 2843 |
2807 | 2844 |
2808 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { | 2845 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { |
2809 if (frame_mode == BUILD_STUB_FRAME) { | 2846 if (frame_mode == BUILD_STUB_FRAME) { |
2810 ASSERT(StackPointer().Is(jssp)); | 2847 ASSERT(StackPointer().Is(jssp)); |
| 2848 UseScratchRegisterScope temps(this); |
| 2849 Register temp = temps.AcquireX(); |
2811 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already | 2850 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already |
2812 // have the special STUB smi? | 2851 // have the special STUB smi? |
2813 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB))); | 2852 __ Mov(temp, Operand(Smi::FromInt(StackFrame::STUB))); |
2814 // Compiled stubs don't age, and so they don't need the predictable code | 2853 // Compiled stubs don't age, and so they don't need the predictable code |
2815 // ageing sequence. | 2854 // ageing sequence. |
2816 __ Push(lr, fp, cp, Tmp0()); | 2855 __ Push(lr, fp, cp, temp); |
2817 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | 2856 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); |
2818 } else { | 2857 } else { |
2819 if (isolate()->IsCodePreAgingActive()) { | 2858 if (isolate()->IsCodePreAgingActive()) { |
2820 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); | 2859 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); |
2821 __ EmitCodeAgeSequence(stub); | 2860 __ EmitCodeAgeSequence(stub); |
2822 } else { | 2861 } else { |
2823 __ EmitFrameSetupForCodeAgePatching(); | 2862 __ EmitFrameSetupForCodeAgePatching(); |
2824 } | 2863 } |
2825 } | 2864 } |
2826 } | 2865 } |
2827 | 2866 |
2828 | 2867 |
2829 void MacroAssembler::EnterFrame(StackFrame::Type type) { | 2868 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
2830 ASSERT(jssp.Is(StackPointer())); | 2869 ASSERT(jssp.Is(StackPointer())); |
| 2870 UseScratchRegisterScope temps(this); |
| 2871 Register type_reg = temps.AcquireX(); |
| 2872 Register code_reg = temps.AcquireX(); |
| 2873 |
2831 Push(lr, fp, cp); | 2874 Push(lr, fp, cp); |
2832 Mov(Tmp1(), Operand(Smi::FromInt(type))); | 2875 Mov(type_reg, Operand(Smi::FromInt(type))); |
2833 Mov(Tmp0(), Operand(CodeObject())); | 2876 Mov(code_reg, Operand(CodeObject())); |
2834 Push(Tmp1(), Tmp0()); | 2877 Push(type_reg, code_reg); |
2835 // jssp[4] : lr | 2878 // jssp[4] : lr |
2836 // jssp[3] : fp | 2879 // jssp[3] : fp |
2837 // jssp[2] : cp | 2880 // jssp[2] : cp |
2838 // jssp[1] : type | 2881 // jssp[1] : type |
2839 // jssp[0] : code object | 2882 // jssp[0] : code object |
2840 | 2883 |
2841 // Adjust FP to point to saved FP. | 2884 // Adjust FP to point to saved FP. |
2842 add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); | 2885 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); |
2843 } | 2886 } |
2844 | 2887 |
2845 | 2888 |
2846 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | 2889 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
2847 ASSERT(jssp.Is(StackPointer())); | 2890 ASSERT(jssp.Is(StackPointer())); |
2848 // Drop the execution stack down to the frame pointer and restore | 2891 // Drop the execution stack down to the frame pointer and restore |
2849 // the caller frame pointer and return address. | 2892 // the caller frame pointer and return address. |
2850 Mov(jssp, fp); | 2893 Mov(jssp, fp); |
2851 AssertStackConsistency(); | 2894 AssertStackConsistency(); |
2852 Pop(fp, lr); | 2895 Pop(fp, lr); |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3098 // Trash the registers to simulate an allocation failure. | 3141 // Trash the registers to simulate an allocation failure. |
3099 // We apply salt to the original zap value to easily spot the values. | 3142 // We apply salt to the original zap value to easily spot the values. |
3100 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | 3143 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); |
3101 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | 3144 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); |
3102 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | 3145 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); |
3103 } | 3146 } |
3104 B(gc_required); | 3147 B(gc_required); |
3105 return; | 3148 return; |
3106 } | 3149 } |
3107 | 3150 |
3108 ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1())); | 3151 UseScratchRegisterScope temps(this); |
3109 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() && | 3152 Register scratch3 = temps.AcquireX(); |
3110 Tmp0().Is64Bits() && Tmp1().Is64Bits()); | 3153 |
| 3154 ASSERT(!AreAliased(result, scratch1, scratch2, scratch3)); |
| 3155 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); |
3111 | 3156 |
3112 // Make object size into bytes. | 3157 // Make object size into bytes. |
3113 if ((flags & SIZE_IN_WORDS) != 0) { | 3158 if ((flags & SIZE_IN_WORDS) != 0) { |
3114 object_size *= kPointerSize; | 3159 object_size *= kPointerSize; |
3115 } | 3160 } |
3116 ASSERT(0 == (object_size & kObjectAlignmentMask)); | 3161 ASSERT(0 == (object_size & kObjectAlignmentMask)); |
3117 | 3162 |
3118 // Check relative positions of allocation top and limit addresses. | 3163 // Check relative positions of allocation top and limit addresses. |
3119 // The values must be adjacent in memory to allow the use of LDP. | 3164 // The values must be adjacent in memory to allow the use of LDP. |
3120 ExternalReference heap_allocation_top = | 3165 ExternalReference heap_allocation_top = |
3121 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 3166 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
3122 ExternalReference heap_allocation_limit = | 3167 ExternalReference heap_allocation_limit = |
3123 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 3168 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
3124 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | 3169 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); |
3125 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | 3170 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); |
3126 ASSERT((limit - top) == kPointerSize); | 3171 ASSERT((limit - top) == kPointerSize); |
3127 | 3172 |
3128 // Set up allocation top address and object size registers. | 3173 // Set up allocation top address and object size registers. |
3129 Register top_address = scratch1; | 3174 Register top_address = scratch1; |
3130 Register allocation_limit = scratch2; | 3175 Register allocation_limit = scratch2; |
3131 Mov(top_address, Operand(heap_allocation_top)); | 3176 Mov(top_address, Operand(heap_allocation_top)); |
3132 | 3177 |
3133 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 3178 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
3134 // Load allocation top into result and the allocation limit. | 3179 // Load allocation top into result and the allocation limit. |
3135 Ldp(result, allocation_limit, MemOperand(top_address)); | 3180 Ldp(result, allocation_limit, MemOperand(top_address)); |
3136 } else { | 3181 } else { |
3137 if (emit_debug_code()) { | 3182 if (emit_debug_code()) { |
3138 // Assert that result actually contains top on entry. | 3183 // Assert that result actually contains top on entry. |
3139 Ldr(Tmp0(), MemOperand(top_address)); | 3184 Ldr(scratch3, MemOperand(top_address)); |
3140 Cmp(result, Tmp0()); | 3185 Cmp(result, scratch3); |
3141 Check(eq, kUnexpectedAllocationTop); | 3186 Check(eq, kUnexpectedAllocationTop); |
3142 } | 3187 } |
3143 // Load the allocation limit. 'result' already contains the allocation top. | 3188 // Load the allocation limit. 'result' already contains the allocation top. |
3144 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | 3189 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
3145 } | 3190 } |
3146 | 3191 |
3147 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 3192 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
3148 // the same alignment on A64. | 3193 // the same alignment on A64. |
3149 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 3194 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
3150 | 3195 |
3151 // Calculate new top and bail out if new space is exhausted. | 3196 // Calculate new top and bail out if new space is exhausted. |
3152 Adds(Tmp1(), result, object_size); | 3197 Adds(scratch3, result, object_size); |
3153 B(vs, gc_required); | 3198 B(vs, gc_required); |
3154 Cmp(Tmp1(), allocation_limit); | 3199 Cmp(scratch3, allocation_limit); |
3155 B(hi, gc_required); | 3200 B(hi, gc_required); |
3156 Str(Tmp1(), MemOperand(top_address)); | 3201 Str(scratch3, MemOperand(top_address)); |
3157 | 3202 |
3158 // Tag the object if requested. | 3203 // Tag the object if requested. |
3159 if ((flags & TAG_OBJECT) != 0) { | 3204 if ((flags & TAG_OBJECT) != 0) { |
3160 Orr(result, result, kHeapObjectTag); | 3205 Orr(result, result, kHeapObjectTag); |
3161 } | 3206 } |
3162 } | 3207 } |
3163 | 3208 |
3164 | 3209 |
3165 void MacroAssembler::Allocate(Register object_size, | 3210 void MacroAssembler::Allocate(Register object_size, |
3166 Register result, | 3211 Register result, |
3167 Register scratch1, | 3212 Register scratch1, |
3168 Register scratch2, | 3213 Register scratch2, |
3169 Label* gc_required, | 3214 Label* gc_required, |
3170 AllocationFlags flags) { | 3215 AllocationFlags flags) { |
3171 if (!FLAG_inline_new) { | 3216 if (!FLAG_inline_new) { |
3172 if (emit_debug_code()) { | 3217 if (emit_debug_code()) { |
3173 // Trash the registers to simulate an allocation failure. | 3218 // Trash the registers to simulate an allocation failure. |
3174 // We apply salt to the original zap value to easily spot the values. | 3219 // We apply salt to the original zap value to easily spot the values. |
3175 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | 3220 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); |
3176 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | 3221 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); |
3177 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | 3222 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); |
3178 } | 3223 } |
3179 B(gc_required); | 3224 B(gc_required); |
3180 return; | 3225 return; |
3181 } | 3226 } |
3182 | 3227 |
3183 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1())); | 3228 UseScratchRegisterScope temps(this); |
3184 ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() && | 3229 Register scratch3 = temps.AcquireX(); |
3185 scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits()); | 3230 |
| 3231 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3)); |
| 3232 ASSERT(object_size.Is64Bits() && result.Is64Bits() && |
| 3233 scratch1.Is64Bits() && scratch2.Is64Bits()); |
3186 | 3234 |
3187 // Check relative positions of allocation top and limit addresses. | 3235 // Check relative positions of allocation top and limit addresses. |
3188 // The values must be adjacent in memory to allow the use of LDP. | 3236 // The values must be adjacent in memory to allow the use of LDP. |
3189 ExternalReference heap_allocation_top = | 3237 ExternalReference heap_allocation_top = |
3190 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 3238 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
3191 ExternalReference heap_allocation_limit = | 3239 ExternalReference heap_allocation_limit = |
3192 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 3240 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
3193 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | 3241 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); |
3194 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | 3242 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); |
3195 ASSERT((limit - top) == kPointerSize); | 3243 ASSERT((limit - top) == kPointerSize); |
3196 | 3244 |
3197 // Set up allocation top address and object size registers. | 3245 // Set up allocation top address and object size registers. |
3198 Register top_address = scratch1; | 3246 Register top_address = scratch1; |
3199 Register allocation_limit = scratch2; | 3247 Register allocation_limit = scratch2; |
3200 Mov(top_address, Operand(heap_allocation_top)); | 3248 Mov(top_address, Operand(heap_allocation_top)); |
3201 | 3249 |
3202 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 3250 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
3203 // Load allocation top into result and the allocation limit. | 3251 // Load allocation top into result and the allocation limit. |
3204 Ldp(result, allocation_limit, MemOperand(top_address)); | 3252 Ldp(result, allocation_limit, MemOperand(top_address)); |
3205 } else { | 3253 } else { |
3206 if (emit_debug_code()) { | 3254 if (emit_debug_code()) { |
3207 // Assert that result actually contains top on entry. | 3255 // Assert that result actually contains top on entry. |
3208 Ldr(Tmp0(), MemOperand(top_address)); | 3256 Ldr(scratch3, MemOperand(top_address)); |
3209 Cmp(result, Tmp0()); | 3257 Cmp(result, scratch3); |
3210 Check(eq, kUnexpectedAllocationTop); | 3258 Check(eq, kUnexpectedAllocationTop); |
3211 } | 3259 } |
3212 // Load the allocation limit. 'result' already contains the allocation top. | 3260 // Load the allocation limit. 'result' already contains the allocation top. |
3213 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | 3261 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
3214 } | 3262 } |
3215 | 3263 |
3216 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 3264 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
3217 // the same alignment on A64. | 3265 // the same alignment on A64. |
3218 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 3266 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
3219 | 3267 |
3220 // Calculate new top and bail out if new space is exhausted | 3268 // Calculate new top and bail out if new space is exhausted |
3221 if ((flags & SIZE_IN_WORDS) != 0) { | 3269 if ((flags & SIZE_IN_WORDS) != 0) { |
3222 Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2)); | 3270 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2)); |
3223 } else { | 3271 } else { |
3224 Adds(Tmp1(), result, object_size); | 3272 Adds(scratch3, result, object_size); |
3225 } | 3273 } |
3226 | 3274 |
3227 if (emit_debug_code()) { | 3275 if (emit_debug_code()) { |
3228 Tst(Tmp1(), kObjectAlignmentMask); | 3276 Tst(scratch3, kObjectAlignmentMask); |
3229 Check(eq, kUnalignedAllocationInNewSpace); | 3277 Check(eq, kUnalignedAllocationInNewSpace); |
3230 } | 3278 } |
3231 | 3279 |
3232 B(vs, gc_required); | 3280 B(vs, gc_required); |
3233 Cmp(Tmp1(), allocation_limit); | 3281 Cmp(scratch3, allocation_limit); |
3234 B(hi, gc_required); | 3282 B(hi, gc_required); |
3235 Str(Tmp1(), MemOperand(top_address)); | 3283 Str(scratch3, MemOperand(top_address)); |
3236 | 3284 |
3237 // Tag the object if requested. | 3285 // Tag the object if requested. |
3238 if ((flags & TAG_OBJECT) != 0) { | 3286 if ((flags & TAG_OBJECT) != 0) { |
3239 Orr(result, result, kHeapObjectTag); | 3287 Orr(result, result, kHeapObjectTag); |
3240 } | 3288 } |
3241 } | 3289 } |
3242 | 3290 |
3243 | 3291 |
3244 void MacroAssembler::UndoAllocationInNewSpace(Register object, | 3292 void MacroAssembler::UndoAllocationInNewSpace(Register object, |
3245 Register scratch) { | 3293 Register scratch) { |
(...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3553 } | 3601 } |
3554 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 3602 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
3555 Cmp(scratch, Operand(map)); | 3603 Cmp(scratch, Operand(map)); |
3556 B(ne, &fail); | 3604 B(ne, &fail); |
3557 Jump(success, RelocInfo::CODE_TARGET); | 3605 Jump(success, RelocInfo::CODE_TARGET); |
3558 Bind(&fail); | 3606 Bind(&fail); |
3559 } | 3607 } |
3560 | 3608 |
3561 | 3609 |
3562 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { | 3610 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { |
3563 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 3611 UseScratchRegisterScope temps(this); |
3564 Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset)); | 3612 Register temp = temps.AcquireX(); |
3565 Tst(Tmp0(), mask); | 3613 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3614 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); |
| 3615 Tst(temp, mask); |
3566 } | 3616 } |
3567 | 3617 |
3568 | 3618 |
3569 void MacroAssembler::LoadElementsKind(Register result, Register object) { | 3619 void MacroAssembler::LoadElementsKind(Register result, Register object) { |
3570 // Load map. | 3620 // Load map. |
3571 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); | 3621 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); |
3572 // Load the map's "bit field 2". | 3622 // Load the map's "bit field 2". |
3573 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset)); | 3623 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset)); |
3574 // Retrieve elements_kind from bit field 2. | 3624 // Retrieve elements_kind from bit field 2. |
3575 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); | 3625 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3627 Bind(&non_instance); | 3677 Bind(&non_instance); |
3628 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | 3678 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); |
3629 | 3679 |
3630 // All done. | 3680 // All done. |
3631 Bind(&done); | 3681 Bind(&done); |
3632 } | 3682 } |
3633 | 3683 |
3634 | 3684 |
3635 void MacroAssembler::CompareRoot(const Register& obj, | 3685 void MacroAssembler::CompareRoot(const Register& obj, |
3636 Heap::RootListIndex index) { | 3686 Heap::RootListIndex index) { |
3637 ASSERT(!AreAliased(obj, Tmp0())); | 3687 UseScratchRegisterScope temps(this); |
3638 LoadRoot(Tmp0(), index); | 3688 Register temp = temps.AcquireX(); |
3639 Cmp(obj, Tmp0()); | 3689 ASSERT(!AreAliased(obj, temp)); |
| 3690 LoadRoot(temp, index); |
| 3691 Cmp(obj, temp); |
3640 } | 3692 } |
3641 | 3693 |
3642 | 3694 |
3643 void MacroAssembler::JumpIfRoot(const Register& obj, | 3695 void MacroAssembler::JumpIfRoot(const Register& obj, |
3644 Heap::RootListIndex index, | 3696 Heap::RootListIndex index, |
3645 Label* if_equal) { | 3697 Label* if_equal) { |
3646 CompareRoot(obj, index); | 3698 CompareRoot(obj, index); |
3647 B(eq, if_equal); | 3699 B(eq, if_equal); |
3648 } | 3700 } |
3649 | 3701 |
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3826 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); | 3878 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); |
3827 Check(lt, kIndexIsTooLarge); | 3879 Check(lt, kIndexIsTooLarge); |
3828 | 3880 |
3829 ASSERT_EQ(0, Smi::FromInt(0)); | 3881 ASSERT_EQ(0, Smi::FromInt(0)); |
3830 Cmp(index, 0); | 3882 Cmp(index, 0); |
3831 Check(ge, kIndexIsNegative); | 3883 Check(ge, kIndexIsNegative); |
3832 } | 3884 } |
3833 | 3885 |
3834 | 3886 |
3835 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 3887 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
3836 Register scratch, | 3888 Register scratch1, |
| 3889 Register scratch2, |
3837 Label* miss) { | 3890 Label* miss) { |
3838 // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function. | 3891 ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); |
3839 // The ARM version takes two scratch registers, and that should be enough for | |
3840 // all of the checks. | |
3841 | |
3842 Label same_contexts; | 3892 Label same_contexts; |
3843 | 3893 |
3844 ASSERT(!AreAliased(holder_reg, scratch)); | |
3845 | |
3846 // Load current lexical context from the stack frame. | 3894 // Load current lexical context from the stack frame. |
3847 Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 3895 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
3848 // In debug mode, make sure the lexical context is set. | 3896 // In debug mode, make sure the lexical context is set. |
3849 #ifdef DEBUG | 3897 #ifdef DEBUG |
3850 Cmp(scratch, 0); | 3898 Cmp(scratch1, 0); |
3851 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | 3899 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); |
3852 #endif | 3900 #endif |
3853 | 3901 |
3854 // Load the native context of the current context. | 3902 // Load the native context of the current context. |
3855 int offset = | 3903 int offset = |
3856 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; | 3904 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
3857 Ldr(scratch, FieldMemOperand(scratch, offset)); | 3905 Ldr(scratch1, FieldMemOperand(scratch1, offset)); |
3858 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 3906 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); |
3859 | 3907 |
3860 // Check the context is a native context. | 3908 // Check the context is a native context. |
3861 if (emit_debug_code()) { | 3909 if (emit_debug_code()) { |
3862 // Read the first word and compare to the global_context_map. | 3910 // Read the first word and compare to the global_context_map. |
3863 Register temp = Tmp1(); | 3911 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset)); |
3864 Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 3912 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex); |
3865 CompareRoot(temp, Heap::kNativeContextMapRootIndex); | |
3866 Check(eq, kExpectedNativeContext); | 3913 Check(eq, kExpectedNativeContext); |
3867 } | 3914 } |
3868 | 3915 |
3869 // Check if both contexts are the same. | 3916 // Check if both contexts are the same. |
3870 ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 3917 Ldr(scratch2, FieldMemOperand(holder_reg, |
3871 cmp(scratch, Tmp0()); | 3918 JSGlobalProxy::kNativeContextOffset)); |
3872 b(&same_contexts, eq); | 3919 Cmp(scratch1, scratch2); |
| 3920 B(&same_contexts, eq); |
3873 | 3921 |
3874 // Check the context is a native context. | 3922 // Check the context is a native context. |
3875 if (emit_debug_code()) { | 3923 if (emit_debug_code()) { |
3876 // Move Tmp0() into a different register, as CompareRoot will use it. | 3924 // We're short on scratch registers here, so use holder_reg as a scratch. |
3877 Register temp = Tmp1(); | 3925 Push(holder_reg); |
3878 mov(temp, Tmp0()); | 3926 Register scratch3 = holder_reg; |
3879 CompareRoot(temp, Heap::kNullValueRootIndex); | 3927 |
| 3928 CompareRoot(scratch2, Heap::kNullValueRootIndex); |
3880 Check(ne, kExpectedNonNullContext); | 3929 Check(ne, kExpectedNonNullContext); |
3881 | 3930 |
3882 Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset)); | 3931 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); |
3883 CompareRoot(temp, Heap::kNativeContextMapRootIndex); | 3932 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex); |
3884 Check(eq, kExpectedNativeContext); | 3933 Check(eq, kExpectedNativeContext); |
3885 | 3934 Pop(holder_reg); |
3886 // Let's consider that Tmp0() has been cloberred by the MacroAssembler. | |
3887 // We reload it with its value. | |
3888 ldr(Tmp0(), FieldMemOperand(holder_reg, | |
3889 JSGlobalProxy::kNativeContextOffset)); | |
3890 } | 3935 } |
3891 | 3936 |
3892 // Check that the security token in the calling global object is | 3937 // Check that the security token in the calling global object is |
3893 // compatible with the security token in the receiving global | 3938 // compatible with the security token in the receiving global |
3894 // object. | 3939 // object. |
3895 int token_offset = Context::kHeaderSize + | 3940 int token_offset = Context::kHeaderSize + |
3896 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 3941 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
3897 | 3942 |
3898 ldr(scratch, FieldMemOperand(scratch, token_offset)); | 3943 Ldr(scratch1, FieldMemOperand(scratch1, token_offset)); |
3899 ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset)); | 3944 Ldr(scratch2, FieldMemOperand(scratch2, token_offset)); |
3900 cmp(scratch, Tmp0()); | 3945 Cmp(scratch1, scratch2); |
3901 b(miss, ne); | 3946 B(miss, ne); |
3902 | 3947 |
3903 bind(&same_contexts); | 3948 Bind(&same_contexts); |
3904 } | 3949 } |
3905 | 3950 |
3906 | 3951 |
3907 // Compute the hash code from the untagged key. This must be kept in sync with | 3952 // Compute the hash code from the untagged key. This must be kept in sync with |
3908 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in | 3953 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in |
3909 // code-stub-hydrogen.cc | 3954 // code-stub-hydrogen.cc |
3910 void MacroAssembler::GetNumberHash(Register key, Register scratch) { | 3955 void MacroAssembler::GetNumberHash(Register key, Register scratch) { |
3911 ASSERT(!AreAliased(key, scratch)); | 3956 ASSERT(!AreAliased(key, scratch)); |
3912 | 3957 |
3913 // Xor original key with a seed. | 3958 // Xor original key with a seed. |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3996 | 4041 |
3997 // Get the value at the masked, scaled index and return. | 4042 // Get the value at the masked, scaled index and return. |
3998 const int kValueOffset = | 4043 const int kValueOffset = |
3999 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 4044 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
4000 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); | 4045 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); |
4001 } | 4046 } |
4002 | 4047 |
4003 | 4048 |
4004 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | 4049 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. |
4005 Register address, | 4050 Register address, |
4006 Register scratch, | 4051 Register scratch1, |
4007 SaveFPRegsMode fp_mode, | 4052 SaveFPRegsMode fp_mode, |
4008 RememberedSetFinalAction and_then) { | 4053 RememberedSetFinalAction and_then) { |
4009 ASSERT(!AreAliased(object, address, scratch)); | 4054 ASSERT(!AreAliased(object, address, scratch1)); |
4010 Label done, store_buffer_overflow; | 4055 Label done, store_buffer_overflow; |
4011 if (emit_debug_code()) { | 4056 if (emit_debug_code()) { |
4012 Label ok; | 4057 Label ok; |
4013 JumpIfNotInNewSpace(object, &ok); | 4058 JumpIfNotInNewSpace(object, &ok); |
4014 Abort(kRememberedSetPointerInNewSpace); | 4059 Abort(kRememberedSetPointerInNewSpace); |
4015 bind(&ok); | 4060 bind(&ok); |
4016 } | 4061 } |
| 4062 UseScratchRegisterScope temps(this); |
| 4063 Register scratch2 = temps.AcquireX(); |
| 4064 |
4017 // Load store buffer top. | 4065 // Load store buffer top. |
4018 Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate()))); | 4066 Mov(scratch2, Operand(ExternalReference::store_buffer_top(isolate()))); |
4019 Ldr(scratch, MemOperand(Tmp0())); | 4067 Ldr(scratch1, MemOperand(scratch2)); |
4020 // Store pointer to buffer and increment buffer top. | 4068 // Store pointer to buffer and increment buffer top. |
4021 Str(address, MemOperand(scratch, kPointerSize, PostIndex)); | 4069 Str(address, MemOperand(scratch1, kPointerSize, PostIndex)); |
4022 // Write back new top of buffer. | 4070 // Write back new top of buffer. |
4023 Str(scratch, MemOperand(Tmp0())); | 4071 Str(scratch1, MemOperand(scratch2)); |
4024 // Call stub on end of buffer. | 4072 // Call stub on end of buffer. |
4025 // Check for end of buffer. | 4073 // Check for end of buffer. |
4026 ASSERT(StoreBuffer::kStoreBufferOverflowBit == | 4074 ASSERT(StoreBuffer::kStoreBufferOverflowBit == |
4027 (1 << (14 + kPointerSizeLog2))); | 4075 (1 << (14 + kPointerSizeLog2))); |
4028 if (and_then == kFallThroughAtEnd) { | 4076 if (and_then == kFallThroughAtEnd) { |
4029 Tbz(scratch, (14 + kPointerSizeLog2), &done); | 4077 Tbz(scratch1, (14 + kPointerSizeLog2), &done); |
4030 } else { | 4078 } else { |
4031 ASSERT(and_then == kReturnAtEnd); | 4079 ASSERT(and_then == kReturnAtEnd); |
4032 Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow); | 4080 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow); |
4033 Ret(); | 4081 Ret(); |
4034 } | 4082 } |
4035 | 4083 |
4036 Bind(&store_buffer_overflow); | 4084 Bind(&store_buffer_overflow); |
4037 Push(lr); | 4085 Push(lr); |
4038 StoreBufferOverflowStub store_buffer_overflow_stub = | 4086 StoreBufferOverflowStub store_buffer_overflow_stub = |
4039 StoreBufferOverflowStub(fp_mode); | 4087 StoreBufferOverflowStub(fp_mode); |
4040 CallStub(&store_buffer_overflow_stub); | 4088 CallStub(&store_buffer_overflow_stub); |
4041 Pop(lr); | 4089 Pop(lr); |
4042 | 4090 |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4170 | 4218 |
4171 // Clobber clobbered input registers when running with the debug-code flag | 4219 // Clobber clobbered input registers when running with the debug-code flag |
4172 // turned on to provoke errors. | 4220 // turned on to provoke errors. |
4173 if (emit_debug_code()) { | 4221 if (emit_debug_code()) { |
4174 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); | 4222 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); |
4175 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); | 4223 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); |
4176 } | 4224 } |
4177 } | 4225 } |
4178 | 4226 |
4179 | 4227 |
4180 // Will clobber: object, address, value, Tmp0(), Tmp1(). | 4228 // Will clobber: object, address, value. |
4181 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. | 4229 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. |
4182 // | 4230 // |
4183 // The register 'object' contains a heap object pointer. The heap object tag is | 4231 // The register 'object' contains a heap object pointer. The heap object tag is |
4184 // shifted away. | 4232 // shifted away. |
4185 void MacroAssembler::RecordWrite(Register object, | 4233 void MacroAssembler::RecordWrite(Register object, |
4186 Register address, | 4234 Register address, |
4187 Register value, | 4235 Register value, |
4188 LinkRegisterStatus lr_status, | 4236 LinkRegisterStatus lr_status, |
4189 SaveFPRegsMode fp_mode, | 4237 SaveFPRegsMode fp_mode, |
4190 RememberedSetAction remembered_set_action, | 4238 RememberedSetAction remembered_set_action, |
4191 SmiCheck smi_check) { | 4239 SmiCheck smi_check) { |
4192 ASM_LOCATION("MacroAssembler::RecordWrite"); | 4240 ASM_LOCATION("MacroAssembler::RecordWrite"); |
4193 ASSERT(!AreAliased(object, value)); | 4241 ASSERT(!AreAliased(object, value)); |
4194 | 4242 |
4195 if (emit_debug_code()) { | 4243 if (emit_debug_code()) { |
4196 Ldr(Tmp0(), MemOperand(address)); | 4244 UseScratchRegisterScope temps(this); |
4197 Cmp(Tmp0(), value); | 4245 Register temp = temps.AcquireX(); |
| 4246 |
| 4247 Ldr(temp, MemOperand(address)); |
| 4248 Cmp(temp, value); |
4198 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 4249 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
4199 } | 4250 } |
4200 | 4251 |
4201 // Count number of write barriers in generated code. | 4252 // Count number of write barriers in generated code. |
4202 isolate()->counters()->write_barriers_static()->Increment(); | 4253 isolate()->counters()->write_barriers_static()->Increment(); |
4203 // TODO(mstarzinger): Dynamic counter missing. | 4254 // TODO(mstarzinger): Dynamic counter missing. |
4204 | 4255 |
4205 // First, check if a write barrier is even needed. The tests below | 4256 // First, check if a write barrier is even needed. The tests below |
4206 // catch stores of smis and stores into the young generation. | 4257 // catch stores of smis and stores into the young generation. |
4207 Label done; | 4258 Label done; |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4252 Tbz(reg, 1, &color_is_valid); | 4303 Tbz(reg, 1, &color_is_valid); |
4253 Abort(kUnexpectedColorFound); | 4304 Abort(kUnexpectedColorFound); |
4254 Bind(&color_is_valid); | 4305 Bind(&color_is_valid); |
4255 } | 4306 } |
4256 } | 4307 } |
4257 | 4308 |
4258 | 4309 |
4259 void MacroAssembler::GetMarkBits(Register addr_reg, | 4310 void MacroAssembler::GetMarkBits(Register addr_reg, |
4260 Register bitmap_reg, | 4311 Register bitmap_reg, |
4261 Register shift_reg) { | 4312 Register shift_reg) { |
4262 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg)); | 4313 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg)); |
| 4314 ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits()); |
4263 // addr_reg is divided into fields: | 4315 // addr_reg is divided into fields: |
4264 // |63 page base 20|19 high 8|7 shift 3|2 0| | 4316 // |63 page base 20|19 high 8|7 shift 3|2 0| |
4265 // 'high' gives the index of the cell holding color bits for the object. | 4317 // 'high' gives the index of the cell holding color bits for the object. |
4266 // 'shift' gives the offset in the cell for this object's color. | 4318 // 'shift' gives the offset in the cell for this object's color. |
4267 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | 4319 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
4268 Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits); | 4320 UseScratchRegisterScope temps(this); |
| 4321 Register temp = temps.AcquireX(); |
| 4322 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits); |
4269 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); | 4323 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); |
4270 Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2)); | 4324 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2)); |
4271 // bitmap_reg: | 4325 // bitmap_reg: |
4272 // |63 page base 20|19 zeros 15|14 high 3|2 0| | 4326 // |63 page base 20|19 zeros 15|14 high 3|2 0| |
4273 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | 4327 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); |
4274 } | 4328 } |
4275 | 4329 |
4276 | 4330 |
4277 void MacroAssembler::HasColor(Register object, | 4331 void MacroAssembler::HasColor(Register object, |
4278 Register bitmap_scratch, | 4332 Register bitmap_scratch, |
4279 Register shift_scratch, | 4333 Register shift_scratch, |
4280 Label* has_color, | 4334 Label* has_color, |
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4484 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { | 4538 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { |
4485 if (emit_debug_code()) { | 4539 if (emit_debug_code()) { |
4486 CheckRegisterIsClear(reg, reason); | 4540 CheckRegisterIsClear(reg, reason); |
4487 } | 4541 } |
4488 } | 4542 } |
4489 | 4543 |
4490 | 4544 |
4491 void MacroAssembler::AssertRegisterIsRoot(Register reg, | 4545 void MacroAssembler::AssertRegisterIsRoot(Register reg, |
4492 Heap::RootListIndex index, | 4546 Heap::RootListIndex index, |
4493 BailoutReason reason) { | 4547 BailoutReason reason) { |
4494 // CompareRoot uses Tmp0(). | |
4495 ASSERT(!reg.Is(Tmp0())); | |
4496 if (emit_debug_code()) { | 4548 if (emit_debug_code()) { |
4497 CompareRoot(reg, index); | 4549 CompareRoot(reg, index); |
4498 Check(eq, reason); | 4550 Check(eq, reason); |
4499 } | 4551 } |
4500 } | 4552 } |
4501 | 4553 |
4502 | 4554 |
4503 void MacroAssembler::AssertFastElements(Register elements) { | 4555 void MacroAssembler::AssertFastElements(Register elements) { |
4504 if (emit_debug_code()) { | 4556 if (emit_debug_code()) { |
4505 Register temp = Tmp1(); | 4557 UseScratchRegisterScope temps(this); |
| 4558 Register temp = temps.AcquireX(); |
4506 Label ok; | 4559 Label ok; |
4507 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); | 4560 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); |
4508 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); | 4561 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); |
4509 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); | 4562 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); |
4510 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); | 4563 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); |
4511 Abort(kJSObjectWithFastElementsMapHasSlowElements); | 4564 Abort(kJSObjectWithFastElementsMapHasSlowElements); |
4512 Bind(&ok); | 4565 Bind(&ok); |
4513 } | 4566 } |
4514 } | 4567 } |
4515 | 4568 |
4516 | 4569 |
4517 void MacroAssembler::AssertIsString(const Register& object) { | 4570 void MacroAssembler::AssertIsString(const Register& object) { |
4518 if (emit_debug_code()) { | 4571 if (emit_debug_code()) { |
4519 Register temp = Tmp1(); | 4572 UseScratchRegisterScope temps(this); |
| 4573 Register temp = temps.AcquireX(); |
4520 STATIC_ASSERT(kSmiTag == 0); | 4574 STATIC_ASSERT(kSmiTag == 0); |
4521 Tst(object, Operand(kSmiTagMask)); | 4575 Tst(object, Operand(kSmiTagMask)); |
4522 Check(ne, kOperandIsNotAString); | 4576 Check(ne, kOperandIsNotAString); |
4523 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 4577 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
4524 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | 4578 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); |
4525 Check(lo, kOperandIsNotAString); | 4579 Check(lo, kOperandIsNotAString); |
4526 } | 4580 } |
4527 } | 4581 } |
4528 | 4582 |
4529 | 4583 |
(...skipping 26 matching lines...) Expand all Loading... |
4556 } | 4610 } |
4557 #endif | 4611 #endif |
4558 | 4612 |
4559 // Abort is used in some contexts where csp is the stack pointer. In order to | 4613 // Abort is used in some contexts where csp is the stack pointer. In order to |
4560 // simplify the CallRuntime code, make sure that jssp is the stack pointer. | 4614 // simplify the CallRuntime code, make sure that jssp is the stack pointer. |
4561 // There is no risk of register corruption here because Abort doesn't return. | 4615 // There is no risk of register corruption here because Abort doesn't return. |
4562 Register old_stack_pointer = StackPointer(); | 4616 Register old_stack_pointer = StackPointer(); |
4563 SetStackPointer(jssp); | 4617 SetStackPointer(jssp); |
4564 Mov(jssp, old_stack_pointer); | 4618 Mov(jssp, old_stack_pointer); |
4565 | 4619 |
| 4620 // We need some scratch registers for the MacroAssembler, so make sure we have |
| 4621 // some. This is safe here because Abort never returns. |
| 4622 RegList old_tmp_list = TmpList()->list(); |
| 4623 TmpList()->Combine(ip0); |
| 4624 TmpList()->Combine(ip1); |
| 4625 |
4566 if (use_real_aborts()) { | 4626 if (use_real_aborts()) { |
4567 // Avoid infinite recursion; Push contains some assertions that use Abort. | 4627 // Avoid infinite recursion; Push contains some assertions that use Abort. |
4568 NoUseRealAbortsScope no_real_aborts(this); | 4628 NoUseRealAbortsScope no_real_aborts(this); |
4569 | 4629 |
4570 Mov(x0, Operand(Smi::FromInt(reason))); | 4630 Mov(x0, Operand(Smi::FromInt(reason))); |
4571 Push(x0); | 4631 Push(x0); |
4572 | 4632 |
4573 if (!has_frame_) { | 4633 if (!has_frame_) { |
4574 // We don't actually want to generate a pile of code for this, so just | 4634 // We don't actually want to generate a pile of code for this, so just |
4575 // claim there is a stack frame, without generating one. | 4635 // claim there is a stack frame, without generating one. |
(...skipping 16 matching lines...) Expand all Loading... |
4592 | 4652 |
4593 // Emit the message string directly in the instruction stream. | 4653 // Emit the message string directly in the instruction stream. |
4594 { | 4654 { |
4595 BlockPoolsScope scope(this); | 4655 BlockPoolsScope scope(this); |
4596 Bind(&msg_address); | 4656 Bind(&msg_address); |
4597 EmitStringData(GetBailoutReason(reason)); | 4657 EmitStringData(GetBailoutReason(reason)); |
4598 } | 4658 } |
4599 } | 4659 } |
4600 | 4660 |
4601 SetStackPointer(old_stack_pointer); | 4661 SetStackPointer(old_stack_pointer); |
| 4662 TmpList()->set_list(old_tmp_list); |
4602 } | 4663 } |
4603 | 4664 |
4604 | 4665 |
4605 void MacroAssembler::LoadTransitionedArrayMapConditional( | 4666 void MacroAssembler::LoadTransitionedArrayMapConditional( |
4606 ElementsKind expected_kind, | 4667 ElementsKind expected_kind, |
4607 ElementsKind transitioned_kind, | 4668 ElementsKind transitioned_kind, |
4608 Register map_in_out, | 4669 Register map_in_out, |
4609 Register scratch, | 4670 Register scratch1, |
| 4671 Register scratch2, |
4610 Label* no_map_match) { | 4672 Label* no_map_match) { |
4611 // Load the global or builtins object from the current context. | 4673 // Load the global or builtins object from the current context. |
4612 Ldr(scratch, GlobalObjectMemOperand()); | 4674 Ldr(scratch1, GlobalObjectMemOperand()); |
4613 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 4675 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); |
4614 | 4676 |
4615 // Check that the function's map is the same as the expected cached map. | 4677 // Check that the function's map is the same as the expected cached map. |
4616 Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX)); | 4678 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX)); |
4617 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | 4679 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; |
4618 Ldr(Tmp0(), FieldMemOperand(scratch, offset)); | 4680 Ldr(scratch2, FieldMemOperand(scratch1, offset)); |
4619 Cmp(map_in_out, Tmp0()); | 4681 Cmp(map_in_out, scratch2); |
4620 B(ne, no_map_match); | 4682 B(ne, no_map_match); |
4621 | 4683 |
4622 // Use the transitioned cached map. | 4684 // Use the transitioned cached map. |
4623 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | 4685 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; |
4624 Ldr(map_in_out, FieldMemOperand(scratch, offset)); | 4686 Ldr(map_in_out, FieldMemOperand(scratch1, offset)); |
4625 } | 4687 } |
4626 | 4688 |
4627 | 4689 |
4628 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | 4690 void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
4629 // Load the global or builtins object from the current context. | 4691 // Load the global or builtins object from the current context. |
4630 Ldr(function, GlobalObjectMemOperand()); | 4692 Ldr(function, GlobalObjectMemOperand()); |
4631 // Load the native context from the global or builtins object. | 4693 // Load the native context from the global or builtins object. |
4632 Ldr(function, FieldMemOperand(function, | 4694 Ldr(function, FieldMemOperand(function, |
4633 GlobalObject::kNativeContextOffset)); | 4695 GlobalObject::kNativeContextOffset)); |
4634 // Load the function from the native context. | 4696 // Load the function from the native context. |
(...skipping 21 matching lines...) Expand all Loading... |
4656 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. | 4718 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. |
4657 void MacroAssembler::PrintfNoPreserve(const char * format, | 4719 void MacroAssembler::PrintfNoPreserve(const char * format, |
4658 const CPURegister& arg0, | 4720 const CPURegister& arg0, |
4659 const CPURegister& arg1, | 4721 const CPURegister& arg1, |
4660 const CPURegister& arg2, | 4722 const CPURegister& arg2, |
4661 const CPURegister& arg3) { | 4723 const CPURegister& arg3) { |
4662 // We cannot handle a caller-saved stack pointer. It doesn't make much sense | 4724 // We cannot handle a caller-saved stack pointer. It doesn't make much sense |
4663 // in most cases anyway, so this restriction shouldn't be too serious. | 4725 // in most cases anyway, so this restriction shouldn't be too serious. |
4664 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); | 4726 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); |
4665 | 4727 |
4666 // We cannot print Tmp0() or Tmp1() as they're used internally by the macro | 4728 // Make sure that the macro assembler doesn't try to use any of our arguments |
4667 // assembler. We cannot print the stack pointer because it is typically used | 4729 // as scratch registers. |
4668 // to preserve caller-saved registers (using other Printf variants which | 4730 ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3)); |
4669 // depend on this helper). | 4731 ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3)); |
4670 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0)); | 4732 |
4671 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1)); | 4733 // We cannot print the stack pointer because it is typically used to preserve |
4672 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2)); | 4734 // caller-saved registers (using other Printf variants which depend on this |
4673 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3)); | 4735 // helper). |
| 4736 ASSERT(!AreAliased(arg0, StackPointer())); |
| 4737 ASSERT(!AreAliased(arg1, StackPointer())); |
| 4738 ASSERT(!AreAliased(arg2, StackPointer())); |
| 4739 ASSERT(!AreAliased(arg3, StackPointer())); |
4674 | 4740 |
4675 static const int kMaxArgCount = 4; | 4741 static const int kMaxArgCount = 4; |
4676 // Assume that we have the maximum number of arguments until we know | 4742 // Assume that we have the maximum number of arguments until we know |
4677 // otherwise. | 4743 // otherwise. |
4678 int arg_count = kMaxArgCount; | 4744 int arg_count = kMaxArgCount; |
4679 | 4745 |
4680 // The provided arguments. | 4746 // The provided arguments. |
4681 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; | 4747 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; |
4682 | 4748 |
4683 // The PCS registers where the arguments need to end up. | 4749 // The PCS registers where the arguments need to end up. |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4805 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); | 4871 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); |
4806 #endif | 4872 #endif |
4807 } | 4873 } |
4808 | 4874 |
4809 | 4875 |
4810 void MacroAssembler::Printf(const char * format, | 4876 void MacroAssembler::Printf(const char * format, |
4811 const CPURegister& arg0, | 4877 const CPURegister& arg0, |
4812 const CPURegister& arg1, | 4878 const CPURegister& arg1, |
4813 const CPURegister& arg2, | 4879 const CPURegister& arg2, |
4814 const CPURegister& arg3) { | 4880 const CPURegister& arg3) { |
| 4881 // Printf is expected to preserve all registers, so make sure that none are |
| 4882 // available as scratch registers until we've preserved them. |
| 4883 RegList old_tmp_list = TmpList()->list(); |
| 4884 RegList old_fp_tmp_list = FPTmpList()->list(); |
| 4885 TmpList()->set_list(0); |
| 4886 FPTmpList()->set_list(0); |
| 4887 |
4815 // Preserve all caller-saved registers as well as NZCV. | 4888 // Preserve all caller-saved registers as well as NZCV. |
4816 // If csp is the stack pointer, PushCPURegList asserts that the size of each | 4889 // If csp is the stack pointer, PushCPURegList asserts that the size of each |
4817 // list is a multiple of 16 bytes. | 4890 // list is a multiple of 16 bytes. |
4818 PushCPURegList(kCallerSaved); | 4891 PushCPURegList(kCallerSaved); |
4819 PushCPURegList(kCallerSavedFP); | 4892 PushCPURegList(kCallerSavedFP); |
4820 // Use Tmp0() as a scratch register. It is not accepted by Printf so it will | 4893 |
4821 // never overlap an argument register. | 4894 // We can use caller-saved registers as scratch values (except for argN). |
4822 Mrs(Tmp0(), NZCV); | 4895 CPURegList tmp_list = kCallerSaved; |
4823 Push(Tmp0(), xzr); | 4896 CPURegList fp_tmp_list = kCallerSavedFP; |
| 4897 tmp_list.Remove(arg0, arg1, arg2, arg3); |
| 4898 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); |
| 4899 TmpList()->set_list(tmp_list.list()); |
| 4900 FPTmpList()->set_list(fp_tmp_list.list()); |
| 4901 |
| 4902 // Preserve NZCV. |
| 4903 { UseScratchRegisterScope temps(this); |
| 4904 Register tmp = temps.AcquireX(); |
| 4905 Mrs(tmp, NZCV); |
| 4906 Push(tmp, xzr); |
| 4907 } |
4824 | 4908 |
4825 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); | 4909 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); |
4826 | 4910 |
4827 Pop(xzr, Tmp0()); | 4911 { UseScratchRegisterScope temps(this); |
4828 Msr(NZCV, Tmp0()); | 4912 Register tmp = temps.AcquireX(); |
| 4913 Pop(xzr, tmp); |
| 4914 Msr(NZCV, tmp); |
| 4915 } |
| 4916 |
4829 PopCPURegList(kCallerSavedFP); | 4917 PopCPURegList(kCallerSavedFP); |
4830 PopCPURegList(kCallerSaved); | 4918 PopCPURegList(kCallerSaved); |
| 4919 |
| 4920 TmpList()->set_list(old_tmp_list); |
| 4921 FPTmpList()->set_list(old_fp_tmp_list); |
4831 } | 4922 } |
4832 | 4923 |
4833 | 4924 |
4834 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { | 4925 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { |
4835 // TODO(jbramley): Other architectures use the internal memcpy to copy the | 4926 // TODO(jbramley): Other architectures use the internal memcpy to copy the |
4836 // sequence. If this is a performance bottleneck, we should consider caching | 4927 // sequence. If this is a performance bottleneck, we should consider caching |
4837 // the sequence and copying it in the same way. | 4928 // the sequence and copying it in the same way. |
4838 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); | 4929 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); |
4839 ASSERT(jssp.Is(StackPointer())); | 4930 ASSERT(jssp.Is(StackPointer())); |
4840 EmitFrameSetupForCodeAgePatching(this); | 4931 EmitFrameSetupForCodeAgePatching(this); |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4928 initialized = true; | 5019 initialized = true; |
4929 } | 5020 } |
4930 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; | 5021 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; |
4931 } | 5022 } |
4932 #endif | 5023 #endif |
4933 | 5024 |
4934 | 5025 |
4935 void MacroAssembler::FlooringDiv(Register result, | 5026 void MacroAssembler::FlooringDiv(Register result, |
4936 Register dividend, | 5027 Register dividend, |
4937 int32_t divisor) { | 5028 int32_t divisor) { |
4938 Register tmp = WTmp0(); | 5029 ASSERT(!AreAliased(result, dividend)); |
4939 ASSERT(!AreAliased(result, dividend, tmp)); | |
4940 ASSERT(result.Is32Bits() && dividend.Is32Bits()); | 5030 ASSERT(result.Is32Bits() && dividend.Is32Bits()); |
4941 MultiplierAndShift ms(divisor); | 5031 MultiplierAndShift ms(divisor); |
4942 Mov(tmp, Operand(ms.multiplier())); | 5032 Mov(result, Operand(ms.multiplier())); |
4943 Smull(result.X(), dividend, tmp); | 5033 Smull(result.X(), dividend, result); |
4944 Asr(result.X(), result.X(), 32); | 5034 Asr(result.X(), result.X(), 32); |
4945 if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend); | 5035 if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend); |
4946 if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend); | 5036 if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend); |
4947 if (ms.shift() > 0) Asr(result, result, ms.shift()); | 5037 if (ms.shift() > 0) Asr(result, result, ms.shift()); |
4948 } | 5038 } |
4949 | 5039 |
4950 | 5040 |
4951 #undef __ | 5041 #undef __ |
| 5042 |
| 5043 |
| 5044 UseScratchRegisterScope::~UseScratchRegisterScope() { |
| 5045 available_->set_list(old_available_); |
| 5046 availablefp_->set_list(old_availablefp_); |
| 5047 } |
| 5048 |
| 5049 |
| 5050 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { |
| 5051 int code = AcquireNextAvailable(available_).code(); |
| 5052 return Register::Create(code, reg.SizeInBits()); |
| 5053 } |
| 5054 |
| 5055 |
| 5056 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { |
| 5057 int code = AcquireNextAvailable(availablefp_).code(); |
| 5058 return FPRegister::Create(code, reg.SizeInBits()); |
| 5059 } |
| 5060 |
| 5061 |
| 5062 CPURegister UseScratchRegisterScope::AcquireNextAvailable( |
| 5063 CPURegList* available) { |
| 5064 CHECK(!available->IsEmpty()); |
| 5065 CPURegister result = available->PopLowestIndex(); |
| 5066 ASSERT(!AreAliased(result, xzr, csp)); |
| 5067 return result; |
| 5068 } |
| 5069 |
| 5070 |
4952 #define __ masm-> | 5071 #define __ masm-> |
4953 | 5072 |
4954 | 5073 |
4955 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, | 5074 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, |
4956 const Label* smi_check) { | 5075 const Label* smi_check) { |
4957 Assembler::BlockPoolsScope scope(masm); | 5076 Assembler::BlockPoolsScope scope(masm); |
4958 if (reg.IsValid()) { | 5077 if (reg.IsValid()) { |
4959 ASSERT(smi_check->is_bound()); | 5078 ASSERT(smi_check->is_bound()); |
4960 ASSERT(reg.Is64Bits()); | 5079 ASSERT(reg.Is64Bits()); |
4961 | 5080 |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4993 } | 5112 } |
4994 } | 5113 } |
4995 | 5114 |
4996 | 5115 |
4997 #undef __ | 5116 #undef __ |
4998 | 5117 |
4999 | 5118 |
5000 } } // namespace v8::internal | 5119 } } // namespace v8::internal |
5001 | 5120 |
5002 #endif // V8_TARGET_ARCH_A64 | 5121 #endif // V8_TARGET_ARCH_A64 |
OLD | NEW |