OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
72 return CPURegList(fp_scratch1, fp_scratch2); | 72 return CPURegList(fp_scratch1, fp_scratch2); |
73 } | 73 } |
74 | 74 |
75 | 75 |
76 void MacroAssembler::LogicalMacro(const Register& rd, | 76 void MacroAssembler::LogicalMacro(const Register& rd, |
77 const Register& rn, | 77 const Register& rn, |
78 const Operand& operand, | 78 const Operand& operand, |
79 LogicalOp op) { | 79 LogicalOp op) { |
80 UseScratchRegisterScope temps(this); | 80 UseScratchRegisterScope temps(this); |
81 | 81 |
82 if (operand.NeedsRelocation()) { | 82 if (operand.NeedsRelocation(isolate())) { |
83 Register temp = temps.AcquireX(); | 83 Register temp = temps.AcquireX(); |
84 LoadRelocated(temp, operand); | 84 LoadRelocated(temp, operand); |
85 Logical(rd, rn, temp, op); | 85 Logical(rd, rn, temp, op); |
86 | 86 |
87 } else if (operand.IsImmediate()) { | 87 } else if (operand.IsImmediate()) { |
88 int64_t immediate = operand.immediate(); | 88 int64_t immediate = operand.immediate(); |
89 unsigned reg_size = rd.SizeInBits(); | 89 unsigned reg_size = rd.SizeInBits(); |
90 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | 90 ASSERT(rd.Is64Bits() || is_uint32(immediate)); |
91 | 91 |
92 // If the operation is NOT, invert the operation and immediate. | 92 // If the operation is NOT, invert the operation and immediate. |
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
263 const Operand& operand, | 263 const Operand& operand, |
264 DiscardMoveMode discard_mode) { | 264 DiscardMoveMode discard_mode) { |
265 ASSERT(allow_macro_instructions_); | 265 ASSERT(allow_macro_instructions_); |
266 ASSERT(!rd.IsZero()); | 266 ASSERT(!rd.IsZero()); |
267 | 267 |
268 // Provide a swap register for instructions that need to write into the | 268 // Provide a swap register for instructions that need to write into the |
269 // system stack pointer (and can't do this inherently). | 269 // system stack pointer (and can't do this inherently). |
270 UseScratchRegisterScope temps(this); | 270 UseScratchRegisterScope temps(this); |
271 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; | 271 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; |
272 | 272 |
273 if (operand.NeedsRelocation()) { | 273 if (operand.NeedsRelocation(isolate())) { |
274 LoadRelocated(dst, operand); | 274 LoadRelocated(dst, operand); |
275 | 275 |
276 } else if (operand.IsImmediate()) { | 276 } else if (operand.IsImmediate()) { |
277 // Call the macro assembler for generic immediates. | 277 // Call the macro assembler for generic immediates. |
278 Mov(dst, operand.immediate()); | 278 Mov(dst, operand.immediate()); |
279 | 279 |
280 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 280 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
281 // Emit a shift instruction if moving a shifted register. This operation | 281 // Emit a shift instruction if moving a shifted register. This operation |
282 // could also be achieved using an orr instruction (like orn used by Mvn), | 282 // could also be achieved using an orr instruction (like orn used by Mvn), |
283 // but using a shift instruction makes the disassembly clearer. | 283 // but using a shift instruction makes the disassembly clearer. |
(...skipping 27 matching lines...) Expand all Loading... |
311 if (!dst.Is(rd)) { | 311 if (!dst.Is(rd)) { |
312 ASSERT(rd.IsSP()); | 312 ASSERT(rd.IsSP()); |
313 Assembler::mov(rd, dst); | 313 Assembler::mov(rd, dst); |
314 } | 314 } |
315 } | 315 } |
316 | 316 |
317 | 317 |
318 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { | 318 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { |
319 ASSERT(allow_macro_instructions_); | 319 ASSERT(allow_macro_instructions_); |
320 | 320 |
321 if (operand.NeedsRelocation()) { | 321 if (operand.NeedsRelocation(isolate())) { |
322 LoadRelocated(rd, operand); | 322 LoadRelocated(rd, operand); |
323 mvn(rd, rd); | 323 mvn(rd, rd); |
324 | 324 |
325 } else if (operand.IsImmediate()) { | 325 } else if (operand.IsImmediate()) { |
326 // Call the macro assembler for generic immediates. | 326 // Call the macro assembler for generic immediates. |
327 Mov(rd, ~operand.immediate()); | 327 Mov(rd, ~operand.immediate()); |
328 | 328 |
329 } else if (operand.IsExtendedRegister()) { | 329 } else if (operand.IsExtendedRegister()) { |
330 // Emit two instructions for the extend case. This differs from Mov, as | 330 // Emit two instructions for the extend case. This differs from Mov, as |
331 // the extend and invert can't be achieved in one instruction. | 331 // the extend and invert can't be achieved in one instruction. |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
366 return IsImmMovz(~imm, reg_size); | 366 return IsImmMovz(~imm, reg_size); |
367 } | 367 } |
368 | 368 |
369 | 369 |
370 void MacroAssembler::ConditionalCompareMacro(const Register& rn, | 370 void MacroAssembler::ConditionalCompareMacro(const Register& rn, |
371 const Operand& operand, | 371 const Operand& operand, |
372 StatusFlags nzcv, | 372 StatusFlags nzcv, |
373 Condition cond, | 373 Condition cond, |
374 ConditionalCompareOp op) { | 374 ConditionalCompareOp op) { |
375 ASSERT((cond != al) && (cond != nv)); | 375 ASSERT((cond != al) && (cond != nv)); |
376 if (operand.NeedsRelocation()) { | 376 if (operand.NeedsRelocation(isolate())) { |
377 UseScratchRegisterScope temps(this); | 377 UseScratchRegisterScope temps(this); |
378 Register temp = temps.AcquireX(); | 378 Register temp = temps.AcquireX(); |
379 LoadRelocated(temp, operand); | 379 LoadRelocated(temp, operand); |
380 ConditionalCompareMacro(rn, temp, nzcv, cond, op); | 380 ConditionalCompareMacro(rn, temp, nzcv, cond, op); |
381 | 381 |
382 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || | 382 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || |
383 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { | 383 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { |
384 // The immediate can be encoded in the instruction, or the operand is an | 384 // The immediate can be encoded in the instruction, or the operand is an |
385 // unshifted register: call the assembler. | 385 // unshifted register: call the assembler. |
386 ConditionalCompare(rn, operand, nzcv, cond, op); | 386 ConditionalCompare(rn, operand, nzcv, cond, op); |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
432 } | 432 } |
433 } | 433 } |
434 | 434 |
435 | 435 |
436 void MacroAssembler::AddSubMacro(const Register& rd, | 436 void MacroAssembler::AddSubMacro(const Register& rd, |
437 const Register& rn, | 437 const Register& rn, |
438 const Operand& operand, | 438 const Operand& operand, |
439 FlagsUpdate S, | 439 FlagsUpdate S, |
440 AddSubOp op) { | 440 AddSubOp op) { |
441 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && | 441 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && |
442 !operand.NeedsRelocation() && (S == LeaveFlags)) { | 442 !operand.NeedsRelocation(isolate()) && (S == LeaveFlags)) { |
443 // The instruction would be a nop. Avoid generating useless code. | 443 // The instruction would be a nop. Avoid generating useless code. |
444 return; | 444 return; |
445 } | 445 } |
446 | 446 |
447 if (operand.NeedsRelocation()) { | 447 if (operand.NeedsRelocation(isolate())) { |
448 UseScratchRegisterScope temps(this); | 448 UseScratchRegisterScope temps(this); |
449 Register temp = temps.AcquireX(); | 449 Register temp = temps.AcquireX(); |
450 LoadRelocated(temp, operand); | 450 LoadRelocated(temp, operand); |
451 AddSubMacro(rd, rn, temp, S, op); | 451 AddSubMacro(rd, rn, temp, S, op); |
452 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || | 452 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || |
453 (rn.IsZero() && !operand.IsShiftedRegister()) || | 453 (rn.IsZero() && !operand.IsShiftedRegister()) || |
454 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 454 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
455 UseScratchRegisterScope temps(this); | 455 UseScratchRegisterScope temps(this); |
456 Register temp = temps.AcquireSameSizeAs(rn); | 456 Register temp = temps.AcquireSameSizeAs(rn); |
457 Mov(temp, operand); | 457 Mov(temp, operand); |
458 AddSub(rd, rn, temp, S, op); | 458 AddSub(rd, rn, temp, S, op); |
459 } else { | 459 } else { |
460 AddSub(rd, rn, operand, S, op); | 460 AddSub(rd, rn, operand, S, op); |
461 } | 461 } |
462 } | 462 } |
463 | 463 |
464 | 464 |
465 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, | 465 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, |
466 const Register& rn, | 466 const Register& rn, |
467 const Operand& operand, | 467 const Operand& operand, |
468 FlagsUpdate S, | 468 FlagsUpdate S, |
469 AddSubWithCarryOp op) { | 469 AddSubWithCarryOp op) { |
470 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 470 ASSERT(rd.SizeInBits() == rn.SizeInBits()); |
471 UseScratchRegisterScope temps(this); | 471 UseScratchRegisterScope temps(this); |
472 | 472 |
473 if (operand.NeedsRelocation()) { | 473 if (operand.NeedsRelocation(isolate())) { |
474 Register temp = temps.AcquireX(); | 474 Register temp = temps.AcquireX(); |
475 LoadRelocated(temp, operand); | 475 LoadRelocated(temp, operand); |
476 AddSubWithCarryMacro(rd, rn, temp, S, op); | 476 AddSubWithCarryMacro(rd, rn, temp, S, op); |
477 | 477 |
478 } else if (operand.IsImmediate() || | 478 } else if (operand.IsImmediate() || |
479 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 479 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
480 // Add/sub with carry (immediate or ROR shifted register.) | 480 // Add/sub with carry (immediate or ROR shifted register.) |
481 Register temp = temps.AcquireSameSizeAs(rn); | 481 Register temp = temps.AcquireSameSizeAs(rn); |
482 Mov(temp, operand); | 482 Mov(temp, operand); |
483 AddSubWithCarry(rd, rn, temp, S, op); | 483 AddSubWithCarry(rd, rn, temp, S, op); |
(...skipping 4754 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5238 } | 5238 } |
5239 } | 5239 } |
5240 | 5240 |
5241 | 5241 |
5242 #undef __ | 5242 #undef __ |
5243 | 5243 |
5244 | 5244 |
5245 } } // namespace v8::internal | 5245 } } // namespace v8::internal |
5246 | 5246 |
5247 #endif // V8_TARGET_ARCH_ARM64 | 5247 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |