| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #if defined(V8_TARGET_ARCH_A64) |
| 31 |
| 32 #include "a64/instructions-a64.h" |
| 33 #include "a64/assembler-a64-inl.h" |
| 34 |
| 35 namespace v8 { |
| 36 namespace internal { |
| 37 |
| 38 |
| 39 static uint64_t RotateRight(uint64_t value, |
| 40 unsigned int rotate, |
| 41 unsigned int width) { |
| 42 ASSERT(width <= 64); |
| 43 rotate &= 63; |
| 44 return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) | |
| 45 (value >> rotate); |
| 46 } |
| 47 |
| 48 |
| 49 static uint64_t RepeatBitsAcrossReg(unsigned reg_size, |
| 50 uint64_t value, |
| 51 unsigned width) { |
| 52 ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || |
| 53 (width == 32)); |
| 54 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); |
| 55 uint64_t result = value & ((1UL << width) - 1UL); |
| 56 for (unsigned i = width; i < reg_size; i *= 2) { |
| 57 result |= (result << i); |
| 58 } |
| 59 return result; |
| 60 } |
| 61 |
| 62 |
| 63 // Logical immediates can't encode zero, so a return value of zero is used to |
| 64 // indicate a failure case. Specifically, where the constraints on imm_s are not |
| 65 // met. |
| 66 uint64_t Instruction::ImmLogical() { |
| 67 unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize; |
| 68 int64_t n = BitN(); |
| 69 int64_t imm_s = ImmSetBits(); |
| 70 int64_t imm_r = ImmRotate(); |
| 71 |
| 72 // An integer is constructed from the n, imm_s and imm_r bits according to |
| 73 // the following table: |
| 74 // |
| 75 // N imms immr size S R |
| 76 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) |
| 77 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) |
| 78 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) |
| 79 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) |
| 80 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) |
| 81 // 0 11110s xxxxxr 2 UInt(s) UInt(r) |
| 82 // (s bits must not be all set) |
| 83 // |
| 84 // A pattern is constructed of size bits, where the least significant S+1 |
| 85 // bits are set. The pattern is rotated right by R, and repeated across a |
| 86 // 32 or 64-bit value, depending on destination register width. |
| 87 // |
| 88 |
| 89 if (n == 1) { |
| 90 if (imm_s == 0x3F) { |
| 91 return 0; |
| 92 } |
| 93 uint64_t bits = (1UL << (imm_s + 1)) - 1; |
| 94 return RotateRight(bits, imm_r, 64); |
| 95 } else { |
| 96 if ((imm_s >> 1) == 0x1F) { |
| 97 return 0; |
| 98 } |
| 99 for (int width = 0x20; width >= 0x2; width >>= 1) { |
| 100 if ((imm_s & width) == 0) { |
| 101 int mask = width - 1; |
| 102 if ((imm_s & mask) == mask) { |
| 103 return 0; |
| 104 } |
| 105 uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1; |
| 106 return RepeatBitsAcrossReg(reg_size, |
| 107 RotateRight(bits, imm_r & mask, width), |
| 108 width); |
| 109 } |
| 110 } |
| 111 } |
| 112 UNREACHABLE(); |
| 113 return 0; |
| 114 } |
| 115 |
| 116 |
| 117 float Instruction::ImmFP32() { |
| 118 // ImmFP: abcdefgh (8 bits) |
| 119 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) |
| 120 // where B is b ^ 1 |
| 121 uint32_t bits = ImmFP(); |
| 122 uint32_t bit7 = (bits >> 7) & 0x1; |
| 123 uint32_t bit6 = (bits >> 6) & 0x1; |
| 124 uint32_t bit5_to_0 = bits & 0x3f; |
| 125 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); |
| 126 |
| 127 return rawbits_to_float(result); |
| 128 } |
| 129 |
| 130 |
| 131 double Instruction::ImmFP64() { |
| 132 // ImmFP: abcdefgh (8 bits) |
| 133 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 |
| 134 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) |
| 135 // where B is b ^ 1 |
| 136 uint32_t bits = ImmFP(); |
| 137 uint64_t bit7 = (bits >> 7) & 0x1; |
| 138 uint64_t bit6 = (bits >> 6) & 0x1; |
| 139 uint64_t bit5_to_0 = bits & 0x3f; |
| 140 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); |
| 141 |
| 142 return rawbits_to_double(result); |
| 143 } |
| 144 |
| 145 |
| 146 LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { |
| 147 switch (op) { |
| 148 case STP_x: |
| 149 case LDP_x: |
| 150 case STP_d: |
| 151 case LDP_d: return LSDoubleWord; |
| 152 default: return LSWord; |
| 153 } |
| 154 } |
| 155 |
| 156 |
| 157 ptrdiff_t Instruction::ImmPCOffset() { |
| 158 ptrdiff_t offset; |
| 159 if (IsPCRelAddressing()) { |
| 160 // PC-relative addressing. Only ADR is supported. |
| 161 offset = ImmPCRel(); |
| 162 } else if (BranchType() != UnknownBranchType) { |
| 163 // All PC-relative branches. |
| 164 // Relative branch offsets are instruction-size-aligned. |
| 165 offset = ImmBranch() << kInstructionSizeLog2; |
| 166 } else { |
| 167 // Load literal (offset from PC). |
| 168 ASSERT(IsLdrLiteral()); |
| 169 // The offset is always shifted by 2 bits, even for loads to 64-bits |
| 170 // registers. |
| 171 offset = ImmLLiteral() << kInstructionSizeLog2; |
| 172 } |
| 173 return offset; |
| 174 } |
| 175 |
| 176 |
| 177 Instruction* Instruction::ImmPCOffsetTarget() { |
| 178 return this + ImmPCOffset(); |
| 179 } |
| 180 |
| 181 |
| 182 inline int Instruction::ImmBranch() const { |
| 183 switch (BranchType()) { |
| 184 case CondBranchType: return ImmCondBranch(); |
| 185 case UncondBranchType: return ImmUncondBranch(); |
| 186 case CompareBranchType: return ImmCmpBranch(); |
| 187 case TestBranchType: return ImmTestBranch(); |
| 188 default: UNREACHABLE(); |
| 189 } |
| 190 return 0; |
| 191 } |
| 192 |
| 193 |
| 194 void Instruction::SetImmPCOffsetTarget(Instruction* target) { |
| 195 if (IsPCRelAddressing()) { |
| 196 SetPCRelImmTarget(target); |
| 197 } else if (BranchType() != UnknownBranchType) { |
| 198 SetBranchImmTarget(target); |
| 199 } else { |
| 200 SetImmLLiteral(target); |
| 201 } |
| 202 } |
| 203 |
| 204 |
| 205 void Instruction::SetPCRelImmTarget(Instruction* target) { |
| 206 // ADRP is not supported, so 'this' must point to an ADR instruction. |
| 207 ASSERT(Mask(PCRelAddressingMask) == ADR); |
| 208 |
| 209 Instr imm = Assembler::ImmPCRelAddress(target - this); |
| 210 |
| 211 SetInstructionBits(Mask(~ImmPCRel_mask) | imm); |
| 212 } |
| 213 |
| 214 |
| 215 void Instruction::SetBranchImmTarget(Instruction* target) { |
| 216 ASSERT(((target - this) & 3) == 0); |
| 217 Instr branch_imm = 0; |
| 218 uint32_t imm_mask = 0; |
| 219 int offset = (target - this) >> kInstructionSizeLog2; |
| 220 switch (BranchType()) { |
| 221 case CondBranchType: { |
| 222 branch_imm = Assembler::ImmCondBranch(offset); |
| 223 imm_mask = ImmCondBranch_mask; |
| 224 break; |
| 225 } |
| 226 case UncondBranchType: { |
| 227 branch_imm = Assembler::ImmUncondBranch(offset); |
| 228 imm_mask = ImmUncondBranch_mask; |
| 229 break; |
| 230 } |
| 231 case CompareBranchType: { |
| 232 branch_imm = Assembler::ImmCmpBranch(offset); |
| 233 imm_mask = ImmCmpBranch_mask; |
| 234 break; |
| 235 } |
| 236 case TestBranchType: { |
| 237 branch_imm = Assembler::ImmTestBranch(offset); |
| 238 imm_mask = ImmTestBranch_mask; |
| 239 break; |
| 240 } |
| 241 default: UNREACHABLE(); |
| 242 } |
| 243 SetInstructionBits(Mask(~imm_mask) | branch_imm); |
| 244 } |
| 245 |
| 246 |
| 247 void Instruction::SetImmLLiteral(Instruction* source) { |
| 248 ASSERT(((source - this) & 3) == 0); |
| 249 int offset = (source - this) >> kLiteralEntrySizeLog2; |
| 250 Instr imm = Assembler::ImmLLiteral(offset); |
| 251 Instr mask = ImmLLiteral_mask; |
| 252 |
| 253 SetInstructionBits(Mask(~mask) | imm); |
| 254 } |
| 255 |
| 256 |
| 257 // TODO(jbramley): We can't put this inline in the class because things like |
| 258 // xzr and Register are not defined in that header. Consider adding |
| 259 // instructions-a64-inl.h to work around this. |
| 260 bool InstructionSequence::IsInlineData() const { |
| 261 // Inline data is encoded as a single movz instruction which writes to xzr |
| 262 // (x31). |
| 263 return IsMovz() && SixtyFourBits() && (Rd() == xzr.code()); |
| 264 // TODO(all): If we extend ::InlineData() to support bigger data, we need |
| 265 // to update this method too. |
| 266 } |
| 267 |
| 268 |
| 269 // TODO(jbramley): We can't put this inline in the class because things like |
| 270 // xzr and Register are not defined in that header. Consider adding |
| 271 // instructions-a64-inl.h to work around this. |
| 272 uint64_t InstructionSequence::InlineData() const { |
| 273 ASSERT(IsInlineData()); |
| 274 uint64_t payload = ImmMoveWide(); |
| 275 // TODO(all): If we extend ::InlineData() to support bigger data, we need |
| 276 // to update this method too. |
| 277 return payload; |
| 278 } |
| 279 |
| 280 |
| 281 } } // namespace v8::internal |
| 282 |
| 283 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |