| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "v8.h" | 5 #include "v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM64 | 7 #if V8_TARGET_ARCH_ARM64 |
| 8 | 8 |
| 9 #include "bootstrapper.h" | 9 #include "bootstrapper.h" |
| 10 #include "codegen.h" | 10 #include "codegen.h" |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 49 return CPURegList(fp_scratch1, fp_scratch2); | 49 return CPURegList(fp_scratch1, fp_scratch2); |
| 50 } | 50 } |
| 51 | 51 |
| 52 | 52 |
| 53 void MacroAssembler::LogicalMacro(const Register& rd, | 53 void MacroAssembler::LogicalMacro(const Register& rd, |
| 54 const Register& rn, | 54 const Register& rn, |
| 55 const Operand& operand, | 55 const Operand& operand, |
| 56 LogicalOp op) { | 56 LogicalOp op) { |
| 57 UseScratchRegisterScope temps(this); | 57 UseScratchRegisterScope temps(this); |
| 58 | 58 |
| 59 if (operand.NeedsRelocation()) { | 59 if (operand.NeedsRelocation(isolate())) { |
| 60 Register temp = temps.AcquireX(); | 60 Register temp = temps.AcquireX(); |
| 61 LoadRelocated(temp, operand); | 61 LoadRelocated(temp, operand); |
| 62 Logical(rd, rn, temp, op); | 62 Logical(rd, rn, temp, op); |
| 63 | 63 |
| 64 } else if (operand.IsImmediate()) { | 64 } else if (operand.IsImmediate()) { |
| 65 int64_t immediate = operand.immediate(); | 65 int64_t immediate = operand.immediate(); |
| 66 unsigned reg_size = rd.SizeInBits(); | 66 unsigned reg_size = rd.SizeInBits(); |
| 67 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | 67 ASSERT(rd.Is64Bits() || is_uint32(immediate)); |
| 68 | 68 |
| 69 // If the operation is NOT, invert the operation and immediate. | 69 // If the operation is NOT, invert the operation and immediate. |
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 240 const Operand& operand, | 240 const Operand& operand, |
| 241 DiscardMoveMode discard_mode) { | 241 DiscardMoveMode discard_mode) { |
| 242 ASSERT(allow_macro_instructions_); | 242 ASSERT(allow_macro_instructions_); |
| 243 ASSERT(!rd.IsZero()); | 243 ASSERT(!rd.IsZero()); |
| 244 | 244 |
| 245 // Provide a swap register for instructions that need to write into the | 245 // Provide a swap register for instructions that need to write into the |
| 246 // system stack pointer (and can't do this inherently). | 246 // system stack pointer (and can't do this inherently). |
| 247 UseScratchRegisterScope temps(this); | 247 UseScratchRegisterScope temps(this); |
| 248 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; | 248 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; |
| 249 | 249 |
| 250 if (operand.NeedsRelocation()) { | 250 if (operand.NeedsRelocation(isolate())) { |
| 251 LoadRelocated(dst, operand); | 251 LoadRelocated(dst, operand); |
| 252 | 252 |
| 253 } else if (operand.IsImmediate()) { | 253 } else if (operand.IsImmediate()) { |
| 254 // Call the macro assembler for generic immediates. | 254 // Call the macro assembler for generic immediates. |
| 255 Mov(dst, operand.immediate()); | 255 Mov(dst, operand.immediate()); |
| 256 | 256 |
| 257 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 257 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
| 258 // Emit a shift instruction if moving a shifted register. This operation | 258 // Emit a shift instruction if moving a shifted register. This operation |
| 259 // could also be achieved using an orr instruction (like orn used by Mvn), | 259 // could also be achieved using an orr instruction (like orn used by Mvn), |
| 260 // but using a shift instruction makes the disassembly clearer. | 260 // but using a shift instruction makes the disassembly clearer. |
| (...skipping 27 matching lines...) Expand all Loading... |
| 288 if (!dst.Is(rd)) { | 288 if (!dst.Is(rd)) { |
| 289 ASSERT(rd.IsSP()); | 289 ASSERT(rd.IsSP()); |
| 290 Assembler::mov(rd, dst); | 290 Assembler::mov(rd, dst); |
| 291 } | 291 } |
| 292 } | 292 } |
| 293 | 293 |
| 294 | 294 |
| 295 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { | 295 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { |
| 296 ASSERT(allow_macro_instructions_); | 296 ASSERT(allow_macro_instructions_); |
| 297 | 297 |
| 298 if (operand.NeedsRelocation()) { | 298 if (operand.NeedsRelocation(isolate())) { |
| 299 LoadRelocated(rd, operand); | 299 LoadRelocated(rd, operand); |
| 300 mvn(rd, rd); | 300 mvn(rd, rd); |
| 301 | 301 |
| 302 } else if (operand.IsImmediate()) { | 302 } else if (operand.IsImmediate()) { |
| 303 // Call the macro assembler for generic immediates. | 303 // Call the macro assembler for generic immediates. |
| 304 Mov(rd, ~operand.immediate()); | 304 Mov(rd, ~operand.immediate()); |
| 305 | 305 |
| 306 } else if (operand.IsExtendedRegister()) { | 306 } else if (operand.IsExtendedRegister()) { |
| 307 // Emit two instructions for the extend case. This differs from Mov, as | 307 // Emit two instructions for the extend case. This differs from Mov, as |
| 308 // the extend and invert can't be achieved in one instruction. | 308 // the extend and invert can't be achieved in one instruction. |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 343 return IsImmMovz(~imm, reg_size); | 343 return IsImmMovz(~imm, reg_size); |
| 344 } | 344 } |
| 345 | 345 |
| 346 | 346 |
| 347 void MacroAssembler::ConditionalCompareMacro(const Register& rn, | 347 void MacroAssembler::ConditionalCompareMacro(const Register& rn, |
| 348 const Operand& operand, | 348 const Operand& operand, |
| 349 StatusFlags nzcv, | 349 StatusFlags nzcv, |
| 350 Condition cond, | 350 Condition cond, |
| 351 ConditionalCompareOp op) { | 351 ConditionalCompareOp op) { |
| 352 ASSERT((cond != al) && (cond != nv)); | 352 ASSERT((cond != al) && (cond != nv)); |
| 353 if (operand.NeedsRelocation()) { | 353 if (operand.NeedsRelocation(isolate())) { |
| 354 UseScratchRegisterScope temps(this); | 354 UseScratchRegisterScope temps(this); |
| 355 Register temp = temps.AcquireX(); | 355 Register temp = temps.AcquireX(); |
| 356 LoadRelocated(temp, operand); | 356 LoadRelocated(temp, operand); |
| 357 ConditionalCompareMacro(rn, temp, nzcv, cond, op); | 357 ConditionalCompareMacro(rn, temp, nzcv, cond, op); |
| 358 | 358 |
| 359 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || | 359 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || |
| 360 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { | 360 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { |
| 361 // The immediate can be encoded in the instruction, or the operand is an | 361 // The immediate can be encoded in the instruction, or the operand is an |
| 362 // unshifted register: call the assembler. | 362 // unshifted register: call the assembler. |
| 363 ConditionalCompare(rn, operand, nzcv, cond, op); | 363 ConditionalCompare(rn, operand, nzcv, cond, op); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 409 } | 409 } |
| 410 } | 410 } |
| 411 | 411 |
| 412 | 412 |
| 413 void MacroAssembler::AddSubMacro(const Register& rd, | 413 void MacroAssembler::AddSubMacro(const Register& rd, |
| 414 const Register& rn, | 414 const Register& rn, |
| 415 const Operand& operand, | 415 const Operand& operand, |
| 416 FlagsUpdate S, | 416 FlagsUpdate S, |
| 417 AddSubOp op) { | 417 AddSubOp op) { |
| 418 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && | 418 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && |
| 419 !operand.NeedsRelocation() && (S == LeaveFlags)) { | 419 !operand.NeedsRelocation(isolate()) && (S == LeaveFlags)) { |
| 420 // The instruction would be a nop. Avoid generating useless code. | 420 // The instruction would be a nop. Avoid generating useless code. |
| 421 return; | 421 return; |
| 422 } | 422 } |
| 423 | 423 |
| 424 if (operand.NeedsRelocation()) { | 424 if (operand.NeedsRelocation(isolate())) { |
| 425 UseScratchRegisterScope temps(this); | 425 UseScratchRegisterScope temps(this); |
| 426 Register temp = temps.AcquireX(); | 426 Register temp = temps.AcquireX(); |
| 427 LoadRelocated(temp, operand); | 427 LoadRelocated(temp, operand); |
| 428 AddSubMacro(rd, rn, temp, S, op); | 428 AddSubMacro(rd, rn, temp, S, op); |
| 429 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || | 429 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || |
| 430 (rn.IsZero() && !operand.IsShiftedRegister()) || | 430 (rn.IsZero() && !operand.IsShiftedRegister()) || |
| 431 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 431 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
| 432 UseScratchRegisterScope temps(this); | 432 UseScratchRegisterScope temps(this); |
| 433 Register temp = temps.AcquireSameSizeAs(rn); | 433 Register temp = temps.AcquireSameSizeAs(rn); |
| 434 Mov(temp, operand); | 434 Mov(temp, operand); |
| 435 AddSub(rd, rn, temp, S, op); | 435 AddSub(rd, rn, temp, S, op); |
| 436 } else { | 436 } else { |
| 437 AddSub(rd, rn, operand, S, op); | 437 AddSub(rd, rn, operand, S, op); |
| 438 } | 438 } |
| 439 } | 439 } |
| 440 | 440 |
| 441 | 441 |
| 442 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, | 442 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, |
| 443 const Register& rn, | 443 const Register& rn, |
| 444 const Operand& operand, | 444 const Operand& operand, |
| 445 FlagsUpdate S, | 445 FlagsUpdate S, |
| 446 AddSubWithCarryOp op) { | 446 AddSubWithCarryOp op) { |
| 447 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 447 ASSERT(rd.SizeInBits() == rn.SizeInBits()); |
| 448 UseScratchRegisterScope temps(this); | 448 UseScratchRegisterScope temps(this); |
| 449 | 449 |
| 450 if (operand.NeedsRelocation()) { | 450 if (operand.NeedsRelocation(isolate())) { |
| 451 Register temp = temps.AcquireX(); | 451 Register temp = temps.AcquireX(); |
| 452 LoadRelocated(temp, operand); | 452 LoadRelocated(temp, operand); |
| 453 AddSubWithCarryMacro(rd, rn, temp, S, op); | 453 AddSubWithCarryMacro(rd, rn, temp, S, op); |
| 454 | 454 |
| 455 } else if (operand.IsImmediate() || | 455 } else if (operand.IsImmediate() || |
| 456 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 456 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
| 457 // Add/sub with carry (immediate or ROR shifted register.) | 457 // Add/sub with carry (immediate or ROR shifted register.) |
| 458 Register temp = temps.AcquireSameSizeAs(rn); | 458 Register temp = temps.AcquireSameSizeAs(rn); |
| 459 Mov(temp, operand); | 459 Mov(temp, operand); |
| 460 AddSubWithCarry(rd, rn, temp, S, op); | 460 AddSubWithCarry(rd, rn, temp, S, op); |
| (...skipping 4754 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5215 } | 5215 } |
| 5216 } | 5216 } |
| 5217 | 5217 |
| 5218 | 5218 |
| 5219 #undef __ | 5219 #undef __ |
| 5220 | 5220 |
| 5221 | 5221 |
| 5222 } } // namespace v8::internal | 5222 } } // namespace v8::internal |
| 5223 | 5223 |
| 5224 #endif // V8_TARGET_ARCH_ARM64 | 5224 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |