| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_ARM64 | 5 #if V8_TARGET_ARCH_ARM64 |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/division-by-constant.h" | 8 #include "src/base/division-by-constant.h" |
| 9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
| 10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
| (...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 285 dst = rd; | 285 dst = rd; |
| 286 } | 286 } |
| 287 | 287 |
| 288 // Copy the result to the system stack pointer. | 288 // Copy the result to the system stack pointer. |
| 289 if (!dst.Is(rd)) { | 289 if (!dst.Is(rd)) { |
| 290 DCHECK(rd.IsSP()); | 290 DCHECK(rd.IsSP()); |
| 291 Assembler::mov(rd, dst); | 291 Assembler::mov(rd, dst); |
| 292 } | 292 } |
| 293 } | 293 } |
| 294 | 294 |
| 295 void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { |
| 296 DCHECK(is_uint16(imm)); |
| 297 int byte1 = (imm & 0xff); |
| 298 int byte2 = ((imm >> 8) & 0xff); |
| 299 if (byte1 == byte2) { |
| 300 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); |
| 301 } else if (byte1 == 0) { |
| 302 movi(vd, byte2, LSL, 8); |
| 303 } else if (byte2 == 0) { |
| 304 movi(vd, byte1); |
| 305 } else if (byte1 == 0xff) { |
| 306 mvni(vd, ~byte2 & 0xff, LSL, 8); |
| 307 } else if (byte2 == 0xff) { |
| 308 mvni(vd, ~byte1 & 0xff); |
| 309 } else { |
| 310 UseScratchRegisterScope temps(this); |
| 311 Register temp = temps.AcquireW(); |
| 312 movz(temp, imm); |
| 313 dup(vd, temp); |
| 314 } |
| 315 } |
| 316 |
| 317 void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { |
| 318 DCHECK(is_uint32(imm)); |
| 319 |
| 320 uint8_t bytes[sizeof(imm)]; |
| 321 memcpy(bytes, &imm, sizeof(imm)); |
| 322 |
| 323 // All bytes are either 0x00 or 0xff. |
| 324 { |
| 325 bool all0orff = true; |
| 326 for (int i = 0; i < 4; ++i) { |
| 327 if ((bytes[i] != 0) && (bytes[i] != 0xff)) { |
| 328 all0orff = false; |
| 329 break; |
| 330 } |
| 331 } |
| 332 |
| 333 if (all0orff == true) { |
| 334 movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm)); |
| 335 return; |
| 336 } |
| 337 } |
| 338 |
| 339 // Of the 4 bytes, only one byte is non-zero. |
| 340 for (int i = 0; i < 4; i++) { |
| 341 if ((imm & (0xff << (i * 8))) == imm) { |
| 342 movi(vd, bytes[i], LSL, i * 8); |
| 343 return; |
| 344 } |
| 345 } |
| 346 |
| 347 // Of the 4 bytes, only one byte is not 0xff. |
| 348 for (int i = 0; i < 4; i++) { |
| 349 uint32_t mask = ~(0xff << (i * 8)); |
| 350 if ((imm & mask) == mask) { |
| 351 mvni(vd, ~bytes[i] & 0xff, LSL, i * 8); |
| 352 return; |
| 353 } |
| 354 } |
| 355 |
| 356 // Immediate is of the form 0x00MMFFFF. |
| 357 if ((imm & 0xff00ffff) == 0x0000ffff) { |
| 358 movi(vd, bytes[2], MSL, 16); |
| 359 return; |
| 360 } |
| 361 |
| 362 // Immediate is of the form 0x0000MMFF. |
| 363 if ((imm & 0xffff00ff) == 0x000000ff) { |
| 364 movi(vd, bytes[1], MSL, 8); |
| 365 return; |
| 366 } |
| 367 |
| 368 // Immediate is of the form 0xFFMM0000. |
| 369 if ((imm & 0xff00ffff) == 0xff000000) { |
| 370 mvni(vd, ~bytes[2] & 0xff, MSL, 16); |
| 371 return; |
| 372 } |
| 373 // Immediate is of the form 0xFFFFMM00. |
| 374 if ((imm & 0xffff00ff) == 0xffff0000) { |
| 375 mvni(vd, ~bytes[1] & 0xff, MSL, 8); |
| 376 return; |
| 377 } |
| 378 |
| 379 // Top and bottom 16-bits are equal. |
| 380 if (((imm >> 16) & 0xffff) == (imm & 0xffff)) { |
| 381 Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff); |
| 382 return; |
| 383 } |
| 384 |
| 385 // Default case. |
| 386 { |
| 387 UseScratchRegisterScope temps(this); |
| 388 Register temp = temps.AcquireW(); |
| 389 Mov(temp, imm); |
| 390 dup(vd, temp); |
| 391 } |
| 392 } |
| 393 |
| 394 void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { |
| 395 // All bytes are either 0x00 or 0xff. |
| 396 { |
| 397 bool all0orff = true; |
| 398 for (int i = 0; i < 8; ++i) { |
| 399 int byteval = (imm >> (i * 8)) & 0xff; |
| 400 if (byteval != 0 && byteval != 0xff) { |
| 401 all0orff = false; |
| 402 break; |
| 403 } |
| 404 } |
| 405 if (all0orff == true) { |
| 406 movi(vd, imm); |
| 407 return; |
| 408 } |
| 409 } |
| 410 |
| 411 // Top and bottom 32-bits are equal. |
| 412 if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) { |
| 413 Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff); |
| 414 return; |
| 415 } |
| 416 |
| 417 // Default case. |
| 418 { |
| 419 UseScratchRegisterScope temps(this); |
| 420 Register temp = temps.AcquireX(); |
| 421 Mov(temp, imm); |
| 422 if (vd.Is1D()) { |
| 423 mov(vd.D(), 0, temp); |
| 424 } else { |
| 425 dup(vd.V2D(), temp); |
| 426 } |
| 427 } |
| 428 } |
| 429 |
| 430 void MacroAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, |
| 431 int shift_amount) { |
| 432 DCHECK(allow_macro_instructions_); |
| 433 if (shift_amount != 0 || shift != LSL) { |
| 434 movi(vd, imm, shift, shift_amount); |
| 435 } else if (vd.Is8B() || vd.Is16B()) { |
| 436 // 8-bit immediate. |
| 437 DCHECK(is_uint8(imm)); |
| 438 movi(vd, imm); |
| 439 } else if (vd.Is4H() || vd.Is8H()) { |
| 440 // 16-bit immediate. |
| 441 Movi16bitHelper(vd, imm); |
| 442 } else if (vd.Is2S() || vd.Is4S()) { |
| 443 // 32-bit immediate. |
| 444 Movi32bitHelper(vd, imm); |
| 445 } else { |
| 446 // 64-bit immediate. |
| 447 Movi64bitHelper(vd, imm); |
| 448 } |
| 449 } |
| 450 |
| 451 void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { |
| 452 // TODO(all): Move 128-bit values in a more efficient way. |
| 453 DCHECK(vd.Is128Bits()); |
| 454 UseScratchRegisterScope temps(this); |
| 455 Movi(vd.V2D(), lo); |
| 456 Register temp = temps.AcquireX(); |
| 457 Mov(temp, hi); |
| 458 Ins(vd.V2D(), 1, temp); |
| 459 } |
| 295 | 460 |
| 296 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { | 461 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { |
| 297 DCHECK(allow_macro_instructions_); | 462 DCHECK(allow_macro_instructions_); |
| 298 | 463 |
| 299 if (operand.NeedsRelocation(this)) { | 464 if (operand.NeedsRelocation(this)) { |
| 300 Ldr(rd, operand.immediate()); | 465 Ldr(rd, operand.immediate()); |
| 301 mvn(rd, rd); | 466 mvn(rd, rd); |
| 302 | 467 |
| 303 } else if (operand.IsImmediate()) { | 468 } else if (operand.IsImmediate()) { |
| 304 // Call the macro assembler for generic immediates. | 469 // Call the macro assembler for generic immediates. |
| (...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 556 // The addressing mode is directly supported by the instruction. | 721 // The addressing mode is directly supported by the instruction. |
| 557 AddSubWithCarry(rd, rn, operand, S, op); | 722 AddSubWithCarry(rd, rn, operand, S, op); |
| 558 } | 723 } |
| 559 } | 724 } |
| 560 | 725 |
| 561 | 726 |
| 562 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, | 727 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, |
| 563 const MemOperand& addr, | 728 const MemOperand& addr, |
| 564 LoadStoreOp op) { | 729 LoadStoreOp op) { |
| 565 int64_t offset = addr.offset(); | 730 int64_t offset = addr.offset(); |
| 566 LSDataSize size = CalcLSDataSize(op); | 731 unsigned size = CalcLSDataSize(op); |
| 567 | 732 |
| 568 // Check if an immediate offset fits in the immediate field of the | 733 // Check if an immediate offset fits in the immediate field of the |
| 569 // appropriate instruction. If not, emit two instructions to perform | 734 // appropriate instruction. If not, emit two instructions to perform |
| 570 // the operation. | 735 // the operation. |
| 571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && | 736 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && |
| 572 !IsImmLSUnscaled(offset)) { | 737 !IsImmLSUnscaled(offset)) { |
| 573 // Immediate offset that can't be encoded using unsigned or unscaled | 738 // Immediate offset that can't be encoded using unsigned or unscaled |
| 574 // addressing modes. | 739 // addressing modes. |
| 575 UseScratchRegisterScope temps(this); | 740 UseScratchRegisterScope temps(this); |
| 576 Register temp = temps.AcquireSameSizeAs(addr.base()); | 741 Register temp = temps.AcquireSameSizeAs(addr.base()); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 591 } | 756 } |
| 592 | 757 |
| 593 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, | 758 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, |
| 594 const CPURegister& rt2, | 759 const CPURegister& rt2, |
| 595 const MemOperand& addr, | 760 const MemOperand& addr, |
| 596 LoadStorePairOp op) { | 761 LoadStorePairOp op) { |
| 597 // TODO(all): Should we support register offset for load-store-pair? | 762 // TODO(all): Should we support register offset for load-store-pair? |
| 598 DCHECK(!addr.IsRegisterOffset()); | 763 DCHECK(!addr.IsRegisterOffset()); |
| 599 | 764 |
| 600 int64_t offset = addr.offset(); | 765 int64_t offset = addr.offset(); |
| 601 LSDataSize size = CalcLSPairDataSize(op); | 766 unsigned size = CalcLSPairDataSize(op); |
| 602 | 767 |
| 603 // Check if the offset fits in the immediate field of the appropriate | 768 // Check if the offset fits in the immediate field of the appropriate |
| 604 // instruction. If not, emit two instructions to perform the operation. | 769 // instruction. If not, emit two instructions to perform the operation. |
| 605 if (IsImmLSPair(offset, size)) { | 770 if (IsImmLSPair(offset, size)) { |
| 606 // Encodable in one load/store pair instruction. | 771 // Encodable in one load/store pair instruction. |
| 607 LoadStorePair(rt, rt2, addr, op); | 772 LoadStorePair(rt, rt2, addr, op); |
| 608 } else { | 773 } else { |
| 609 Register base = addr.base(); | 774 Register base = addr.base(); |
| 610 if (addr.IsImmediateOffset()) { | 775 if (addr.IsImmediateOffset()) { |
| 611 UseScratchRegisterScope temps(this); | 776 UseScratchRegisterScope temps(this); |
| (...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 919 DCHECK(dst0.IsValid()); | 1084 DCHECK(dst0.IsValid()); |
| 920 | 1085 |
| 921 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid(); | 1086 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid(); |
| 922 int size = dst0.SizeInBytes(); | 1087 int size = dst0.SizeInBytes(); |
| 923 | 1088 |
| 924 PopHelper(4, size, dst0, dst1, dst2, dst3); | 1089 PopHelper(4, size, dst0, dst1, dst2, dst3); |
| 925 PopHelper(count - 4, size, dst4, dst5, dst6, dst7); | 1090 PopHelper(count - 4, size, dst4, dst5, dst6, dst7); |
| 926 PopPostamble(count, size); | 1091 PopPostamble(count, size); |
| 927 } | 1092 } |
| 928 | 1093 |
| 929 | 1094 void MacroAssembler::Push(const Register& src0, const VRegister& src1) { |
| 930 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) { | |
| 931 int size = src0.SizeInBytes() + src1.SizeInBytes(); | 1095 int size = src0.SizeInBytes() + src1.SizeInBytes(); |
| 932 | 1096 |
| 933 PushPreamble(size); | 1097 PushPreamble(size); |
| 934 // Reserve room for src0 and push src1. | 1098 // Reserve room for src0 and push src1. |
| 935 str(src1, MemOperand(StackPointer(), -size, PreIndex)); | 1099 str(src1, MemOperand(StackPointer(), -size, PreIndex)); |
| 936 // Fill the gap with src0. | 1100 // Fill the gap with src0. |
| 937 str(src0, MemOperand(StackPointer(), src1.SizeInBytes())); | 1101 str(src0, MemOperand(StackPointer(), src1.SizeInBytes())); |
| 938 } | 1102 } |
| 939 | 1103 |
| 940 | 1104 |
| (...skipping 440 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1381 Tst(fpcr, RMode_mask); | 1545 Tst(fpcr, RMode_mask); |
| 1382 B(eq, &done); | 1546 B(eq, &done); |
| 1383 | 1547 |
| 1384 Bind(&unexpected_mode); | 1548 Bind(&unexpected_mode); |
| 1385 Abort(kUnexpectedFPCRMode); | 1549 Abort(kUnexpectedFPCRMode); |
| 1386 | 1550 |
| 1387 Bind(&done); | 1551 Bind(&done); |
| 1388 } | 1552 } |
| 1389 } | 1553 } |
| 1390 | 1554 |
| 1391 | 1555 void MacroAssembler::CanonicalizeNaN(const VRegister& dst, |
| 1392 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst, | 1556 const VRegister& src) { |
| 1393 const FPRegister& src) { | |
| 1394 AssertFPCRState(); | 1557 AssertFPCRState(); |
| 1395 | 1558 |
| 1396 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which | 1559 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which |
| 1397 // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0 | 1560 // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0 |
| 1398 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0. | 1561 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0. |
| 1399 Fsub(dst, src, fp_zero); | 1562 Fsub(dst, src, fp_zero); |
| 1400 } | 1563 } |
| 1401 | 1564 |
| 1402 | 1565 |
| 1403 void MacroAssembler::LoadRoot(CPURegister destination, | 1566 void MacroAssembler::LoadRoot(CPURegister destination, |
| (...skipping 678 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2082 } | 2245 } |
| 2083 | 2246 |
| 2084 AssertNotSmi(object); | 2247 AssertNotSmi(object); |
| 2085 | 2248 |
| 2086 UseScratchRegisterScope temps(this); | 2249 UseScratchRegisterScope temps(this); |
| 2087 Register temp = temps.AcquireX(); | 2250 Register temp = temps.AcquireX(); |
| 2088 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 2251 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2089 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number); | 2252 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number); |
| 2090 } | 2253 } |
| 2091 | 2254 |
| 2092 | 2255 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value, |
| 2093 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, | 2256 VRegister scratch_d, |
| 2094 FPRegister value, | |
| 2095 FPRegister scratch_d, | |
| 2096 Label* on_successful_conversion, | 2257 Label* on_successful_conversion, |
| 2097 Label* on_failed_conversion) { | 2258 Label* on_failed_conversion) { |
| 2098 // Convert to an int and back again, then compare with the original value. | 2259 // Convert to an int and back again, then compare with the original value. |
| 2099 Fcvtzs(as_int, value); | 2260 Fcvtzs(as_int, value); |
| 2100 Scvtf(scratch_d, as_int); | 2261 Scvtf(scratch_d, as_int); |
| 2101 Fcmp(value, scratch_d); | 2262 Fcmp(value, scratch_d); |
| 2102 | 2263 |
| 2103 if (on_successful_conversion) { | 2264 if (on_successful_conversion) { |
| 2104 B(on_successful_conversion, eq); | 2265 B(on_successful_conversion, eq); |
| 2105 } | 2266 } |
| (...skipping 576 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2682 DCHECK(jssp.Is(StackPointer())); | 2843 DCHECK(jssp.Is(StackPointer())); |
| 2683 // Drop the execution stack down to the frame pointer and restore | 2844 // Drop the execution stack down to the frame pointer and restore |
| 2684 // the caller frame pointer and return address. | 2845 // the caller frame pointer and return address. |
| 2685 Mov(jssp, fp); | 2846 Mov(jssp, fp); |
| 2686 AssertStackConsistency(); | 2847 AssertStackConsistency(); |
| 2687 Pop(fp, lr); | 2848 Pop(fp, lr); |
| 2688 } | 2849 } |
| 2689 | 2850 |
| 2690 | 2851 |
| 2691 void MacroAssembler::ExitFramePreserveFPRegs() { | 2852 void MacroAssembler::ExitFramePreserveFPRegs() { |
| 2692 PushCPURegList(kCallerSavedFP); | 2853 PushCPURegList(kCallerSavedV); |
| 2693 } | 2854 } |
| 2694 | 2855 |
| 2695 | 2856 |
| 2696 void MacroAssembler::ExitFrameRestoreFPRegs() { | 2857 void MacroAssembler::ExitFrameRestoreFPRegs() { |
| 2697 // Read the registers from the stack without popping them. The stack pointer | 2858 // Read the registers from the stack without popping them. The stack pointer |
| 2698 // will be reset as part of the unwinding process. | 2859 // will be reset as part of the unwinding process. |
| 2699 CPURegList saved_fp_regs = kCallerSavedFP; | 2860 CPURegList saved_fp_regs = kCallerSavedV; |
| 2700 DCHECK(saved_fp_regs.Count() % 2 == 0); | 2861 DCHECK(saved_fp_regs.Count() % 2 == 0); |
| 2701 | 2862 |
| 2702 int offset = ExitFrameConstants::kLastExitFrameField; | 2863 int offset = ExitFrameConstants::kLastExitFrameField; |
| 2703 while (!saved_fp_regs.IsEmpty()) { | 2864 while (!saved_fp_regs.IsEmpty()) { |
| 2704 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); | 2865 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); |
| 2705 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); | 2866 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); |
| 2706 offset -= 2 * kDRegSize; | 2867 offset -= 2 * kDRegSize; |
| 2707 Ldp(dst1, dst0, MemOperand(fp, offset)); | 2868 Ldp(dst1, dst0, MemOperand(fp, offset)); |
| 2708 } | 2869 } |
| 2709 } | 2870 } |
| (...skipping 457 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3167 NO_ALLOCATION_FLAGS); | 3328 NO_ALLOCATION_FLAGS); |
| 3168 | 3329 |
| 3169 Heap::RootListIndex map_index = mode == MUTABLE | 3330 Heap::RootListIndex map_index = mode == MUTABLE |
| 3170 ? Heap::kMutableHeapNumberMapRootIndex | 3331 ? Heap::kMutableHeapNumberMapRootIndex |
| 3171 : Heap::kHeapNumberMapRootIndex; | 3332 : Heap::kHeapNumberMapRootIndex; |
| 3172 | 3333 |
| 3173 // Prepare the heap number map. | 3334 // Prepare the heap number map. |
| 3174 if (!heap_number_map.IsValid()) { | 3335 if (!heap_number_map.IsValid()) { |
| 3175 // If we have a valid value register, use the same type of register to store | 3336 // If we have a valid value register, use the same type of register to store |
| 3176 // the map so we can use STP to store both in one instruction. | 3337 // the map so we can use STP to store both in one instruction. |
| 3177 if (value.IsValid() && value.IsFPRegister()) { | 3338 if (value.IsValid() && value.IsVRegister()) { |
| 3178 heap_number_map = temps.AcquireD(); | 3339 heap_number_map = temps.AcquireD(); |
| 3179 } else { | 3340 } else { |
| 3180 heap_number_map = scratch1; | 3341 heap_number_map = scratch1; |
| 3181 } | 3342 } |
| 3182 LoadRoot(heap_number_map, map_index); | 3343 LoadRoot(heap_number_map, map_index); |
| 3183 } | 3344 } |
| 3184 if (emit_debug_code()) { | 3345 if (emit_debug_code()) { |
| 3185 Register map; | 3346 Register map; |
| 3186 if (heap_number_map.IsFPRegister()) { | 3347 if (heap_number_map.IsVRegister()) { |
| 3187 map = scratch1; | 3348 map = scratch1; |
| 3188 Fmov(map, DoubleRegister(heap_number_map)); | 3349 Fmov(map, DoubleRegister(heap_number_map)); |
| 3189 } else { | 3350 } else { |
| 3190 map = Register(heap_number_map); | 3351 map = Register(heap_number_map); |
| 3191 } | 3352 } |
| 3192 AssertRegisterIsRoot(map, map_index); | 3353 AssertRegisterIsRoot(map, map_index); |
| 3193 } | 3354 } |
| 3194 | 3355 |
| 3195 // Store the heap number map and the value in the allocated object. | 3356 // Store the heap number map and the value in the allocated object. |
| 3196 if (value.IsSameSizeAndType(heap_number_map)) { | 3357 if (value.IsSameSizeAndType(heap_number_map)) { |
| (...skipping 492 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3689 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 3850 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
| 3690 DCHECK(num_unsaved >= 0); | 3851 DCHECK(num_unsaved >= 0); |
| 3691 Claim(num_unsaved); | 3852 Claim(num_unsaved); |
| 3692 PushXRegList(kSafepointSavedRegisters); | 3853 PushXRegList(kSafepointSavedRegisters); |
| 3693 } | 3854 } |
| 3694 | 3855 |
| 3695 | 3856 |
| 3696 void MacroAssembler::PushSafepointRegistersAndDoubles() { | 3857 void MacroAssembler::PushSafepointRegistersAndDoubles() { |
| 3697 PushSafepointRegisters(); | 3858 PushSafepointRegisters(); |
| 3698 PushCPURegList(CPURegList( | 3859 PushCPURegList(CPURegList( |
| 3699 CPURegister::kFPRegister, kDRegSizeInBits, | 3860 CPURegister::kVRegister, kDRegSizeInBits, |
| 3700 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask())); | 3861 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask())); |
| 3701 } | 3862 } |
| 3702 | 3863 |
| 3703 | 3864 |
| 3704 void MacroAssembler::PopSafepointRegistersAndDoubles() { | 3865 void MacroAssembler::PopSafepointRegistersAndDoubles() { |
| 3705 PopCPURegList(CPURegList( | 3866 PopCPURegList(CPURegList( |
| 3706 CPURegister::kFPRegister, kDRegSizeInBits, | 3867 CPURegister::kVRegister, kDRegSizeInBits, |
| 3707 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask())); | 3868 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask())); |
| 3708 PopSafepointRegisters(); | 3869 PopSafepointRegisters(); |
| 3709 } | 3870 } |
| 3710 | 3871 |
| 3711 | 3872 |
| 3712 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 3873 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
| 3713 // Make sure the safepoint registers list is what we expect. | 3874 // Make sure the safepoint registers list is what we expect. |
| 3714 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); | 3875 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); |
| 3715 | 3876 |
| 3716 // Safepoint registers are stored contiguously on the stack, but not all the | 3877 // Safepoint registers are stored contiguously on the stack, but not all the |
| (...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4255 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3}; | 4416 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3}; |
| 4256 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg}; | 4417 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg}; |
| 4257 | 4418 |
| 4258 int arg_count = kPrintfMaxArgCount; | 4419 int arg_count = kPrintfMaxArgCount; |
| 4259 | 4420 |
| 4260 // The PCS varargs registers for printf. Note that x0 is used for the printf | 4421 // The PCS varargs registers for printf. Note that x0 is used for the printf |
| 4261 // format string. | 4422 // format string. |
| 4262 static const CPURegList kPCSVarargs = | 4423 static const CPURegList kPCSVarargs = |
| 4263 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count); | 4424 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count); |
| 4264 static const CPURegList kPCSVarargsFP = | 4425 static const CPURegList kPCSVarargsFP = |
| 4265 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1); | 4426 CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, arg_count - 1); |
| 4266 | 4427 |
| 4267 // We can use caller-saved registers as scratch values, except for the | 4428 // We can use caller-saved registers as scratch values, except for the |
| 4268 // arguments and the PCS registers where they might need to go. | 4429 // arguments and the PCS registers where they might need to go. |
| 4269 CPURegList tmp_list = kCallerSaved; | 4430 CPURegList tmp_list = kCallerSaved; |
| 4270 tmp_list.Remove(x0); // Used to pass the format string. | 4431 tmp_list.Remove(x0); // Used to pass the format string. |
| 4271 tmp_list.Remove(kPCSVarargs); | 4432 tmp_list.Remove(kPCSVarargs); |
| 4272 tmp_list.Remove(arg0, arg1, arg2, arg3); | 4433 tmp_list.Remove(arg0, arg1, arg2, arg3); |
| 4273 | 4434 |
| 4274 CPURegList fp_tmp_list = kCallerSavedFP; | 4435 CPURegList fp_tmp_list = kCallerSavedV; |
| 4275 fp_tmp_list.Remove(kPCSVarargsFP); | 4436 fp_tmp_list.Remove(kPCSVarargsFP); |
| 4276 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); | 4437 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); |
| 4277 | 4438 |
| 4278 // Override the MacroAssembler's scratch register list. The lists will be | 4439 // Override the MacroAssembler's scratch register list. The lists will be |
| 4279 // reset automatically at the end of the UseScratchRegisterScope. | 4440 // reset automatically at the end of the UseScratchRegisterScope. |
| 4280 UseScratchRegisterScope temps(this); | 4441 UseScratchRegisterScope temps(this); |
| 4281 TmpList()->set_list(tmp_list.list()); | 4442 TmpList()->set_list(tmp_list.list()); |
| 4282 FPTmpList()->set_list(fp_tmp_list.list()); | 4443 FPTmpList()->set_list(fp_tmp_list.list()); |
| 4283 | 4444 |
| 4284 // Copies of the printf vararg registers that we can pop from. | 4445 // Copies of the printf vararg registers that we can pop from. |
| 4285 CPURegList pcs_varargs = kPCSVarargs; | 4446 CPURegList pcs_varargs = kPCSVarargs; |
| 4286 CPURegList pcs_varargs_fp = kPCSVarargsFP; | 4447 CPURegList pcs_varargs_fp = kPCSVarargsFP; |
| 4287 | 4448 |
| 4288 // Place the arguments. There are lots of clever tricks and optimizations we | 4449 // Place the arguments. There are lots of clever tricks and optimizations we |
| 4289 // could use here, but Printf is a debug tool so instead we just try to keep | 4450 // could use here, but Printf is a debug tool so instead we just try to keep |
| 4290 // it simple: Move each input that isn't already in the right place to a | 4451 // it simple: Move each input that isn't already in the right place to a |
| 4291 // scratch register, then move everything back. | 4452 // scratch register, then move everything back. |
| 4292 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) { | 4453 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) { |
| 4293 // Work out the proper PCS register for this argument. | 4454 // Work out the proper PCS register for this argument. |
| 4294 if (args[i].IsRegister()) { | 4455 if (args[i].IsRegister()) { |
| 4295 pcs[i] = pcs_varargs.PopLowestIndex().X(); | 4456 pcs[i] = pcs_varargs.PopLowestIndex().X(); |
| 4296 // We might only need a W register here. We need to know the size of the | 4457 // We might only need a W register here. We need to know the size of the |
| 4297 // argument so we can properly encode it for the simulator call. | 4458 // argument so we can properly encode it for the simulator call. |
| 4298 if (args[i].Is32Bits()) pcs[i] = pcs[i].W(); | 4459 if (args[i].Is32Bits()) pcs[i] = pcs[i].W(); |
| 4299 } else if (args[i].IsFPRegister()) { | 4460 } else if (args[i].IsVRegister()) { |
| 4300 // In C, floats are always cast to doubles for varargs calls. | 4461 // In C, floats are always cast to doubles for varargs calls. |
| 4301 pcs[i] = pcs_varargs_fp.PopLowestIndex().D(); | 4462 pcs[i] = pcs_varargs_fp.PopLowestIndex().D(); |
| 4302 } else { | 4463 } else { |
| 4303 DCHECK(args[i].IsNone()); | 4464 DCHECK(args[i].IsNone()); |
| 4304 arg_count = i; | 4465 arg_count = i; |
| 4305 break; | 4466 break; |
| 4306 } | 4467 } |
| 4307 | 4468 |
| 4308 // If the argument is already in the right place, leave it where it is. | 4469 // If the argument is already in the right place, leave it where it is. |
| 4309 if (args[i].Aliases(pcs[i])) continue; | 4470 if (args[i].Aliases(pcs[i])) continue; |
| 4310 | 4471 |
| 4311 // Otherwise, if the argument is in a PCS argument register, allocate an | 4472 // Otherwise, if the argument is in a PCS argument register, allocate an |
| 4312 // appropriate scratch register and then move it out of the way. | 4473 // appropriate scratch register and then move it out of the way. |
| 4313 if (kPCSVarargs.IncludesAliasOf(args[i]) || | 4474 if (kPCSVarargs.IncludesAliasOf(args[i]) || |
| 4314 kPCSVarargsFP.IncludesAliasOf(args[i])) { | 4475 kPCSVarargsFP.IncludesAliasOf(args[i])) { |
| 4315 if (args[i].IsRegister()) { | 4476 if (args[i].IsRegister()) { |
| 4316 Register old_arg = Register(args[i]); | 4477 Register old_arg = Register(args[i]); |
| 4317 Register new_arg = temps.AcquireSameSizeAs(old_arg); | 4478 Register new_arg = temps.AcquireSameSizeAs(old_arg); |
| 4318 Mov(new_arg, old_arg); | 4479 Mov(new_arg, old_arg); |
| 4319 args[i] = new_arg; | 4480 args[i] = new_arg; |
| 4320 } else { | 4481 } else { |
| 4321 FPRegister old_arg = FPRegister(args[i]); | 4482 VRegister old_arg = VRegister(args[i]); |
| 4322 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg); | 4483 VRegister new_arg = temps.AcquireSameSizeAs(old_arg); |
| 4323 Fmov(new_arg, old_arg); | 4484 Fmov(new_arg, old_arg); |
| 4324 args[i] = new_arg; | 4485 args[i] = new_arg; |
| 4325 } | 4486 } |
| 4326 } | 4487 } |
| 4327 } | 4488 } |
| 4328 | 4489 |
| 4329 // Do a second pass to move values into their final positions and perform any | 4490 // Do a second pass to move values into their final positions and perform any |
| 4330 // conversions that may be required. | 4491 // conversions that may be required. |
| 4331 for (int i = 0; i < arg_count; i++) { | 4492 for (int i = 0; i < arg_count; i++) { |
| 4332 DCHECK(pcs[i].type() == args[i].type()); | 4493 DCHECK(pcs[i].type() == args[i].type()); |
| 4333 if (pcs[i].IsRegister()) { | 4494 if (pcs[i].IsRegister()) { |
| 4334 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg); | 4495 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg); |
| 4335 } else { | 4496 } else { |
| 4336 DCHECK(pcs[i].IsFPRegister()); | 4497 DCHECK(pcs[i].IsVRegister()); |
| 4337 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) { | 4498 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) { |
| 4338 Fmov(FPRegister(pcs[i]), FPRegister(args[i])); | 4499 Fmov(VRegister(pcs[i]), VRegister(args[i])); |
| 4339 } else { | 4500 } else { |
| 4340 Fcvt(FPRegister(pcs[i]), FPRegister(args[i])); | 4501 Fcvt(VRegister(pcs[i]), VRegister(args[i])); |
| 4341 } | 4502 } |
| 4342 } | 4503 } |
| 4343 } | 4504 } |
| 4344 | 4505 |
| 4345 // Load the format string into x0, as per the procedure-call standard. | 4506 // Load the format string into x0, as per the procedure-call standard. |
| 4346 // | 4507 // |
| 4347 // To make the code as portable as possible, the format string is encoded | 4508 // To make the code as portable as possible, the format string is encoded |
| 4348 // directly in the instruction stream. It might be cleaner to encode it in a | 4509 // directly in the instruction stream. It might be cleaner to encode it in a |
| 4349 // literal pool, but since Printf is usually used for debugging, it is | 4510 // literal pool, but since Printf is usually used for debugging, it is |
| 4350 // beneficial for it to be minimally dependent on other features. | 4511 // beneficial for it to be minimally dependent on other features. |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4418 // available as scratch registers until we've preserved them. | 4579 // available as scratch registers until we've preserved them. |
| 4419 RegList old_tmp_list = TmpList()->list(); | 4580 RegList old_tmp_list = TmpList()->list(); |
| 4420 RegList old_fp_tmp_list = FPTmpList()->list(); | 4581 RegList old_fp_tmp_list = FPTmpList()->list(); |
| 4421 TmpList()->set_list(0); | 4582 TmpList()->set_list(0); |
| 4422 FPTmpList()->set_list(0); | 4583 FPTmpList()->set_list(0); |
| 4423 | 4584 |
| 4424 // Preserve all caller-saved registers as well as NZCV. | 4585 // Preserve all caller-saved registers as well as NZCV. |
| 4425 // If csp is the stack pointer, PushCPURegList asserts that the size of each | 4586 // If csp is the stack pointer, PushCPURegList asserts that the size of each |
| 4426 // list is a multiple of 16 bytes. | 4587 // list is a multiple of 16 bytes. |
| 4427 PushCPURegList(kCallerSaved); | 4588 PushCPURegList(kCallerSaved); |
| 4428 PushCPURegList(kCallerSavedFP); | 4589 PushCPURegList(kCallerSavedV); |
| 4429 | 4590 |
| 4430 // We can use caller-saved registers as scratch values (except for argN). | 4591 // We can use caller-saved registers as scratch values (except for argN). |
| 4431 CPURegList tmp_list = kCallerSaved; | 4592 CPURegList tmp_list = kCallerSaved; |
| 4432 CPURegList fp_tmp_list = kCallerSavedFP; | 4593 CPURegList fp_tmp_list = kCallerSavedV; |
| 4433 tmp_list.Remove(arg0, arg1, arg2, arg3); | 4594 tmp_list.Remove(arg0, arg1, arg2, arg3); |
| 4434 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); | 4595 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); |
| 4435 TmpList()->set_list(tmp_list.list()); | 4596 TmpList()->set_list(tmp_list.list()); |
| 4436 FPTmpList()->set_list(fp_tmp_list.list()); | 4597 FPTmpList()->set_list(fp_tmp_list.list()); |
| 4437 | 4598 |
| 4438 { UseScratchRegisterScope temps(this); | 4599 { UseScratchRegisterScope temps(this); |
| 4439 // If any of the arguments are the current stack pointer, allocate a new | 4600 // If any of the arguments are the current stack pointer, allocate a new |
| 4440 // register for them, and adjust the value to compensate for pushing the | 4601 // register for them, and adjust the value to compensate for pushing the |
| 4441 // caller-saved registers. | 4602 // caller-saved registers. |
| 4442 bool arg0_sp = StackPointer().Aliases(arg0); | 4603 bool arg0_sp = StackPointer().Aliases(arg0); |
| 4443 bool arg1_sp = StackPointer().Aliases(arg1); | 4604 bool arg1_sp = StackPointer().Aliases(arg1); |
| 4444 bool arg2_sp = StackPointer().Aliases(arg2); | 4605 bool arg2_sp = StackPointer().Aliases(arg2); |
| 4445 bool arg3_sp = StackPointer().Aliases(arg3); | 4606 bool arg3_sp = StackPointer().Aliases(arg3); |
| 4446 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) { | 4607 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) { |
| 4447 // Allocate a register to hold the original stack pointer value, to pass | 4608 // Allocate a register to hold the original stack pointer value, to pass |
| 4448 // to PrintfNoPreserve as an argument. | 4609 // to PrintfNoPreserve as an argument. |
| 4449 Register arg_sp = temps.AcquireX(); | 4610 Register arg_sp = temps.AcquireX(); |
| 4450 Add(arg_sp, StackPointer(), | 4611 Add(arg_sp, StackPointer(), |
| 4451 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes()); | 4612 kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes()); |
| 4452 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits()); | 4613 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits()); |
| 4453 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits()); | 4614 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits()); |
| 4454 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits()); | 4615 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits()); |
| 4455 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits()); | 4616 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits()); |
| 4456 } | 4617 } |
| 4457 | 4618 |
| 4458 // Preserve NZCV. | 4619 // Preserve NZCV. |
| 4459 { UseScratchRegisterScope temps(this); | 4620 { UseScratchRegisterScope temps(this); |
| 4460 Register tmp = temps.AcquireX(); | 4621 Register tmp = temps.AcquireX(); |
| 4461 Mrs(tmp, NZCV); | 4622 Mrs(tmp, NZCV); |
| 4462 Push(tmp, xzr); | 4623 Push(tmp, xzr); |
| 4463 } | 4624 } |
| 4464 | 4625 |
| 4465 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); | 4626 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); |
| 4466 | 4627 |
| 4467 // Restore NZCV. | 4628 // Restore NZCV. |
| 4468 { UseScratchRegisterScope temps(this); | 4629 { UseScratchRegisterScope temps(this); |
| 4469 Register tmp = temps.AcquireX(); | 4630 Register tmp = temps.AcquireX(); |
| 4470 Pop(xzr, tmp); | 4631 Pop(xzr, tmp); |
| 4471 Msr(NZCV, tmp); | 4632 Msr(NZCV, tmp); |
| 4472 } | 4633 } |
| 4473 } | 4634 } |
| 4474 | 4635 |
| 4475 PopCPURegList(kCallerSavedFP); | 4636 PopCPURegList(kCallerSavedV); |
| 4476 PopCPURegList(kCallerSaved); | 4637 PopCPURegList(kCallerSaved); |
| 4477 | 4638 |
| 4478 TmpList()->set_list(old_tmp_list); | 4639 TmpList()->set_list(old_tmp_list); |
| 4479 FPTmpList()->set_list(old_fp_tmp_list); | 4640 FPTmpList()->set_list(old_fp_tmp_list); |
| 4480 } | 4641 } |
| 4481 | 4642 |
| 4482 | 4643 |
| 4483 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { | 4644 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { |
| 4484 // TODO(jbramley): Other architectures use the internal memcpy to copy the | 4645 // TODO(jbramley): Other architectures use the internal memcpy to copy the |
| 4485 // sequence. If this is a performance bottleneck, we should consider caching | 4646 // sequence. If this is a performance bottleneck, we should consider caching |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4579 available_->set_list(old_available_); | 4740 available_->set_list(old_available_); |
| 4580 availablefp_->set_list(old_availablefp_); | 4741 availablefp_->set_list(old_availablefp_); |
| 4581 } | 4742 } |
| 4582 | 4743 |
| 4583 | 4744 |
| 4584 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { | 4745 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { |
| 4585 int code = AcquireNextAvailable(available_).code(); | 4746 int code = AcquireNextAvailable(available_).code(); |
| 4586 return Register::Create(code, reg.SizeInBits()); | 4747 return Register::Create(code, reg.SizeInBits()); |
| 4587 } | 4748 } |
| 4588 | 4749 |
| 4589 | 4750 VRegister UseScratchRegisterScope::AcquireSameSizeAs(const VRegister& reg) { |
| 4590 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { | |
| 4591 int code = AcquireNextAvailable(availablefp_).code(); | 4751 int code = AcquireNextAvailable(availablefp_).code(); |
| 4592 return FPRegister::Create(code, reg.SizeInBits()); | 4752 return VRegister::Create(code, reg.SizeInBits()); |
| 4593 } | 4753 } |
| 4594 | 4754 |
| 4595 | 4755 |
| 4596 CPURegister UseScratchRegisterScope::AcquireNextAvailable( | 4756 CPURegister UseScratchRegisterScope::AcquireNextAvailable( |
| 4597 CPURegList* available) { | 4757 CPURegList* available) { |
| 4598 CHECK(!available->IsEmpty()); | 4758 CHECK(!available->IsEmpty()); |
| 4599 CPURegister result = available->PopLowestIndex(); | 4759 CPURegister result = available->PopLowestIndex(); |
| 4600 DCHECK(!AreAliased(result, xzr, csp)); | 4760 DCHECK(!AreAliased(result, xzr, csp)); |
| 4601 return result; | 4761 return result; |
| 4602 } | 4762 } |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4657 } | 4817 } |
| 4658 | 4818 |
| 4659 | 4819 |
| 4660 #undef __ | 4820 #undef __ |
| 4661 | 4821 |
| 4662 | 4822 |
| 4663 } // namespace internal | 4823 } // namespace internal |
| 4664 } // namespace v8 | 4824 } // namespace v8 |
| 4665 | 4825 |
| 4666 #endif // V8_TARGET_ARCH_ARM64 | 4826 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |