Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(306)

Side by Side Diff: src/arm64/macro-assembler-arm64.cc

Issue 2922173004: [arm64] Fix pre-shifted immediate generation involving csp. (Closed)
Patch Set: Add extra test, fix mask generation bug. Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm64/macro-assembler-arm64.h ('k') | test/cctest/test-assembler-arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_ARM64 5 #if V8_TARGET_ARCH_ARM64
6 6
7 #include "src/arm64/frames-arm64.h" 7 #include "src/arm64/frames-arm64.h"
8 #include "src/assembler.h" 8 #include "src/assembler.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h" 10 #include "src/base/division-by-constant.h"
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
122 } 122 }
123 } 123 }
124 124
125 unsigned n, imm_s, imm_r; 125 unsigned n, imm_s, imm_r;
126 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { 126 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
127 // Immediate can be encoded in the instruction. 127 // Immediate can be encoded in the instruction.
128 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); 128 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
129 } else { 129 } else {
130 // Immediate can't be encoded: synthesize using move immediate. 130 // Immediate can't be encoded: synthesize using move immediate.
131 Register temp = temps.AcquireSameSizeAs(rn); 131 Register temp = temps.AcquireSameSizeAs(rn);
132 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate); 132
133 // If the left-hand input is the stack pointer, we can't pre-shift the
134 // immediate, as the encoding won't allow the subsequent post shift.
135 PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
136 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
137
133 if (rd.Is(csp)) { 138 if (rd.Is(csp)) {
134 // If rd is the stack pointer we cannot use it as the destination 139 // If rd is the stack pointer we cannot use it as the destination
135 // register so we use the temp register as an intermediate again. 140 // register so we use the temp register as an intermediate again.
136 Logical(temp, rn, imm_operand, op); 141 Logical(temp, rn, imm_operand, op);
137 Mov(csp, temp); 142 Mov(csp, temp);
138 AssertStackConsistency(); 143 AssertStackConsistency();
139 } else { 144 } else {
140 Logical(rd, rn, imm_operand, op); 145 Logical(rd, rn, imm_operand, op);
141 } 146 }
142 } 147 }
(...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after
595 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask)); 600 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
596 return true; 601 return true;
597 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { 602 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
598 // Immediate can be represented in a logical orr instruction. 603 // Immediate can be represented in a logical orr instruction.
599 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR); 604 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
600 return true; 605 return true;
601 } 606 }
602 return false; 607 return false;
603 } 608 }
604 609
605
606 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, 610 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
607 int64_t imm) { 611 int64_t imm,
612 PreShiftImmMode mode) {
608 int reg_size = dst.SizeInBits(); 613 int reg_size = dst.SizeInBits();
609
610 // Encode the immediate in a single move instruction, if possible. 614 // Encode the immediate in a single move instruction, if possible.
611 if (TryOneInstrMoveImmediate(dst, imm)) { 615 if (TryOneInstrMoveImmediate(dst, imm)) {
612 // The move was successful; nothing to do here. 616 // The move was successful; nothing to do here.
613 } else { 617 } else {
614 // Pre-shift the immediate to the least-significant bits of the register. 618 // Pre-shift the immediate to the least-significant bits of the register.
615 int shift_low = CountTrailingZeros(imm, reg_size); 619 int shift_low = CountTrailingZeros(imm, reg_size);
620 if (mode == kLimitShiftForSP) {
621 // When applied to the stack pointer, the subsequent arithmetic operation
622 // can use the extend form to shift left by a maximum of four bits. Right
623 // shifts are not allowed, so we filter them out later before the new
624 // immediate is tested.
625 shift_low = std::min(shift_low, 4);
626 }
616 int64_t imm_low = imm >> shift_low; 627 int64_t imm_low = imm >> shift_low;
617 628
618 // Pre-shift the immediate to the most-significant bits of the register. We 629 // Pre-shift the immediate to the most-significant bits of the register. We
619 // insert set bits in the least-significant bits, as this creates a 630 // insert set bits in the least-significant bits, as this creates a
620 // different immediate that may be encodable using movn or orr-immediate. 631 // different immediate that may be encodable using movn or orr-immediate.
621 // If this new immediate is encodable, the set bits will be eliminated by 632 // If this new immediate is encodable, the set bits will be eliminated by
622 // the post shift on the following instruction. 633 // the post shift on the following instruction.
623 int shift_high = CountLeadingZeros(imm, reg_size); 634 int shift_high = CountLeadingZeros(imm, reg_size);
624 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1); 635 int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
625 636
626 if (TryOneInstrMoveImmediate(dst, imm_low)) { 637 if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
627 // The new immediate has been moved into the destination's low bits: 638 // The new immediate has been moved into the destination's low bits:
628 // return a new leftward-shifting operand. 639 // return a new leftward-shifting operand.
629 return Operand(dst, LSL, shift_low); 640 return Operand(dst, LSL, shift_low);
630 } else if (TryOneInstrMoveImmediate(dst, imm_high)) { 641 } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
631 // The new immediate has been moved into the destination's high bits: 642 // The new immediate has been moved into the destination's high bits:
632 // return a new rightward-shifting operand. 643 // return a new rightward-shifting operand.
633 return Operand(dst, LSR, shift_high); 644 return Operand(dst, LSR, shift_high);
634 } else { 645 } else {
635 // Use the generic move operation to set up the immediate. 646 // Use the generic move operation to set up the immediate.
636 Mov(dst, imm); 647 Mov(dst, imm);
637 } 648 }
638 } 649 }
639 return Operand(dst); 650 return Operand(dst);
640 } 651 }
(...skipping 15 matching lines...) Expand all
656 Register temp = temps.AcquireX(); 667 Register temp = temps.AcquireX();
657 Ldr(temp, operand.immediate()); 668 Ldr(temp, operand.immediate());
658 AddSubMacro(rd, rn, temp, S, op); 669 AddSubMacro(rd, rn, temp, S, op);
659 } else if ((operand.IsImmediate() && 670 } else if ((operand.IsImmediate() &&
660 !IsImmAddSub(operand.ImmediateValue())) || 671 !IsImmAddSub(operand.ImmediateValue())) ||
661 (rn.IsZero() && !operand.IsShiftedRegister()) || 672 (rn.IsZero() && !operand.IsShiftedRegister()) ||
662 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { 673 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
663 UseScratchRegisterScope temps(this); 674 UseScratchRegisterScope temps(this);
664 Register temp = temps.AcquireSameSizeAs(rn); 675 Register temp = temps.AcquireSameSizeAs(rn);
665 if (operand.IsImmediate()) { 676 if (operand.IsImmediate()) {
677 PreShiftImmMode mode = kAnyShift;
678
679 // If the destination or source register is the stack pointer, we can
680 // only pre-shift the immediate right by values supported in the add/sub
681 // extend encoding.
682 if (rd.Is(csp)) {
683 // If the destination is SP and flags will be set, we can't pre-shift
684 // the immediate at all.
685 mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
686 } else if (rn.Is(csp)) {
687 mode = kLimitShiftForSP;
688 }
689
666 Operand imm_operand = 690 Operand imm_operand =
667 MoveImmediateForShiftedOp(temp, operand.ImmediateValue()); 691 MoveImmediateForShiftedOp(temp, operand.ImmediateValue(), mode);
668 AddSub(rd, rn, imm_operand, S, op); 692 AddSub(rd, rn, imm_operand, S, op);
669 } else { 693 } else {
670 Mov(temp, operand); 694 Mov(temp, operand);
671 AddSub(rd, rn, temp, S, op); 695 AddSub(rd, rn, temp, S, op);
672 } 696 }
673 } else { 697 } else {
674 AddSub(rd, rn, operand, S, op); 698 AddSub(rd, rn, operand, S, op);
675 } 699 }
676 } 700 }
677 701
(...skipping 4091 matching lines...) Expand 10 before | Expand all | Expand 10 after
4769 } 4793 }
4770 4794
4771 4795
4772 #undef __ 4796 #undef __
4773 4797
4774 4798
4775 } // namespace internal 4799 } // namespace internal
4776 } // namespace v8 4800 } // namespace v8
4777 4801
4778 #endif // V8_TARGET_ARCH_ARM64 4802 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/arm64/macro-assembler-arm64.h ('k') | test/cctest/test-assembler-arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698