| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 #include "v8.h" | |
| 29 | |
| 30 #if V8_TARGET_ARCH_A64 | |
| 31 | |
| 32 #define A64_DEFINE_FP_STATICS | |
| 33 | |
| 34 #include "a64/instructions-a64.h" | |
| 35 #include "a64/assembler-a64-inl.h" | |
| 36 | |
| 37 namespace v8 { | |
| 38 namespace internal { | |
| 39 | |
| 40 | |
| 41 bool Instruction::IsLoad() const { | |
| 42 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { | |
| 43 return false; | |
| 44 } | |
| 45 | |
| 46 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { | |
| 47 return Mask(LoadStorePairLBit) != 0; | |
| 48 } else { | |
| 49 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask)); | |
| 50 switch (op) { | |
| 51 case LDRB_w: | |
| 52 case LDRH_w: | |
| 53 case LDR_w: | |
| 54 case LDR_x: | |
| 55 case LDRSB_w: | |
| 56 case LDRSB_x: | |
| 57 case LDRSH_w: | |
| 58 case LDRSH_x: | |
| 59 case LDRSW_x: | |
| 60 case LDR_s: | |
| 61 case LDR_d: return true; | |
| 62 default: return false; | |
| 63 } | |
| 64 } | |
| 65 } | |
| 66 | |
| 67 | |
| 68 bool Instruction::IsStore() const { | |
| 69 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { | |
| 70 return false; | |
| 71 } | |
| 72 | |
| 73 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { | |
| 74 return Mask(LoadStorePairLBit) == 0; | |
| 75 } else { | |
| 76 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask)); | |
| 77 switch (op) { | |
| 78 case STRB_w: | |
| 79 case STRH_w: | |
| 80 case STR_w: | |
| 81 case STR_x: | |
| 82 case STR_s: | |
| 83 case STR_d: return true; | |
| 84 default: return false; | |
| 85 } | |
| 86 } | |
| 87 } | |
| 88 | |
| 89 | |
| 90 static uint64_t RotateRight(uint64_t value, | |
| 91 unsigned int rotate, | |
| 92 unsigned int width) { | |
| 93 ASSERT(width <= 64); | |
| 94 rotate &= 63; | |
| 95 return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) | | |
| 96 (value >> rotate); | |
| 97 } | |
| 98 | |
| 99 | |
| 100 static uint64_t RepeatBitsAcrossReg(unsigned reg_size, | |
| 101 uint64_t value, | |
| 102 unsigned width) { | |
| 103 ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || | |
| 104 (width == 32)); | |
| 105 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); | |
| 106 uint64_t result = value & ((1UL << width) - 1UL); | |
| 107 for (unsigned i = width; i < reg_size; i *= 2) { | |
| 108 result |= (result << i); | |
| 109 } | |
| 110 return result; | |
| 111 } | |
| 112 | |
| 113 | |
| 114 // Logical immediates can't encode zero, so a return value of zero is used to | |
| 115 // indicate a failure case. Specifically, where the constraints on imm_s are not | |
| 116 // met. | |
| 117 uint64_t Instruction::ImmLogical() { | |
| 118 unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize; | |
| 119 int64_t n = BitN(); | |
| 120 int64_t imm_s = ImmSetBits(); | |
| 121 int64_t imm_r = ImmRotate(); | |
| 122 | |
| 123 // An integer is constructed from the n, imm_s and imm_r bits according to | |
| 124 // the following table: | |
| 125 // | |
| 126 // N imms immr size S R | |
| 127 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) | |
| 128 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) | |
| 129 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) | |
| 130 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) | |
| 131 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) | |
| 132 // 0 11110s xxxxxr 2 UInt(s) UInt(r) | |
| 133 // (s bits must not be all set) | |
| 134 // | |
| 135 // A pattern is constructed of size bits, where the least significant S+1 | |
| 136 // bits are set. The pattern is rotated right by R, and repeated across a | |
| 137 // 32 or 64-bit value, depending on destination register width. | |
| 138 // | |
| 139 | |
| 140 if (n == 1) { | |
| 141 if (imm_s == 0x3F) { | |
| 142 return 0; | |
| 143 } | |
| 144 uint64_t bits = (1UL << (imm_s + 1)) - 1; | |
| 145 return RotateRight(bits, imm_r, 64); | |
| 146 } else { | |
| 147 if ((imm_s >> 1) == 0x1F) { | |
| 148 return 0; | |
| 149 } | |
| 150 for (int width = 0x20; width >= 0x2; width >>= 1) { | |
| 151 if ((imm_s & width) == 0) { | |
| 152 int mask = width - 1; | |
| 153 if ((imm_s & mask) == mask) { | |
| 154 return 0; | |
| 155 } | |
| 156 uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1; | |
| 157 return RepeatBitsAcrossReg(reg_size, | |
| 158 RotateRight(bits, imm_r & mask, width), | |
| 159 width); | |
| 160 } | |
| 161 } | |
| 162 } | |
| 163 UNREACHABLE(); | |
| 164 return 0; | |
| 165 } | |
| 166 | |
| 167 | |
| 168 float Instruction::ImmFP32() { | |
| 169 // ImmFP: abcdefgh (8 bits) | |
| 170 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) | |
| 171 // where B is b ^ 1 | |
| 172 uint32_t bits = ImmFP(); | |
| 173 uint32_t bit7 = (bits >> 7) & 0x1; | |
| 174 uint32_t bit6 = (bits >> 6) & 0x1; | |
| 175 uint32_t bit5_to_0 = bits & 0x3f; | |
| 176 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); | |
| 177 | |
| 178 return rawbits_to_float(result); | |
| 179 } | |
| 180 | |
| 181 | |
| 182 double Instruction::ImmFP64() { | |
| 183 // ImmFP: abcdefgh (8 bits) | |
| 184 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 | |
| 185 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) | |
| 186 // where B is b ^ 1 | |
| 187 uint32_t bits = ImmFP(); | |
| 188 uint64_t bit7 = (bits >> 7) & 0x1; | |
| 189 uint64_t bit6 = (bits >> 6) & 0x1; | |
| 190 uint64_t bit5_to_0 = bits & 0x3f; | |
| 191 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); | |
| 192 | |
| 193 return rawbits_to_double(result); | |
| 194 } | |
| 195 | |
| 196 | |
| 197 LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { | |
| 198 switch (op) { | |
| 199 case STP_x: | |
| 200 case LDP_x: | |
| 201 case STP_d: | |
| 202 case LDP_d: return LSDoubleWord; | |
| 203 default: return LSWord; | |
| 204 } | |
| 205 } | |
| 206 | |
| 207 | |
| 208 ptrdiff_t Instruction::ImmPCOffset() { | |
| 209 ptrdiff_t offset; | |
| 210 if (IsPCRelAddressing()) { | |
| 211 // PC-relative addressing. Only ADR is supported. | |
| 212 offset = ImmPCRel(); | |
| 213 } else if (BranchType() != UnknownBranchType) { | |
| 214 // All PC-relative branches. | |
| 215 // Relative branch offsets are instruction-size-aligned. | |
| 216 offset = ImmBranch() << kInstructionSizeLog2; | |
| 217 } else { | |
| 218 // Load literal (offset from PC). | |
| 219 ASSERT(IsLdrLiteral()); | |
| 220 // The offset is always shifted by 2 bits, even for loads to 64-bits | |
| 221 // registers. | |
| 222 offset = ImmLLiteral() << kInstructionSizeLog2; | |
| 223 } | |
| 224 return offset; | |
| 225 } | |
| 226 | |
| 227 | |
| 228 Instruction* Instruction::ImmPCOffsetTarget() { | |
| 229 return this + ImmPCOffset(); | |
| 230 } | |
| 231 | |
| 232 | |
| 233 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, | |
| 234 int32_t offset) { | |
| 235 return is_intn(offset, ImmBranchRangeBitwidth(branch_type)); | |
| 236 } | |
| 237 | |
| 238 | |
| 239 bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) { | |
| 240 int offset = target - this; | |
| 241 return IsValidImmPCOffset(BranchType(), offset); | |
| 242 } | |
| 243 | |
| 244 | |
| 245 void Instruction::SetImmPCOffsetTarget(Instruction* target) { | |
| 246 if (IsPCRelAddressing()) { | |
| 247 SetPCRelImmTarget(target); | |
| 248 } else if (BranchType() != UnknownBranchType) { | |
| 249 SetBranchImmTarget(target); | |
| 250 } else { | |
| 251 SetImmLLiteral(target); | |
| 252 } | |
| 253 } | |
| 254 | |
| 255 | |
| 256 void Instruction::SetPCRelImmTarget(Instruction* target) { | |
| 257 // ADRP is not supported, so 'this' must point to an ADR instruction. | |
| 258 ASSERT(Mask(PCRelAddressingMask) == ADR); | |
| 259 | |
| 260 Instr imm = Assembler::ImmPCRelAddress(target - this); | |
| 261 | |
| 262 SetInstructionBits(Mask(~ImmPCRel_mask) | imm); | |
| 263 } | |
| 264 | |
| 265 | |
| 266 void Instruction::SetBranchImmTarget(Instruction* target) { | |
| 267 ASSERT(((target - this) & 3) == 0); | |
| 268 Instr branch_imm = 0; | |
| 269 uint32_t imm_mask = 0; | |
| 270 int offset = (target - this) >> kInstructionSizeLog2; | |
| 271 switch (BranchType()) { | |
| 272 case CondBranchType: { | |
| 273 branch_imm = Assembler::ImmCondBranch(offset); | |
| 274 imm_mask = ImmCondBranch_mask; | |
| 275 break; | |
| 276 } | |
| 277 case UncondBranchType: { | |
| 278 branch_imm = Assembler::ImmUncondBranch(offset); | |
| 279 imm_mask = ImmUncondBranch_mask; | |
| 280 break; | |
| 281 } | |
| 282 case CompareBranchType: { | |
| 283 branch_imm = Assembler::ImmCmpBranch(offset); | |
| 284 imm_mask = ImmCmpBranch_mask; | |
| 285 break; | |
| 286 } | |
| 287 case TestBranchType: { | |
| 288 branch_imm = Assembler::ImmTestBranch(offset); | |
| 289 imm_mask = ImmTestBranch_mask; | |
| 290 break; | |
| 291 } | |
| 292 default: UNREACHABLE(); | |
| 293 } | |
| 294 SetInstructionBits(Mask(~imm_mask) | branch_imm); | |
| 295 } | |
| 296 | |
| 297 | |
| 298 void Instruction::SetImmLLiteral(Instruction* source) { | |
| 299 ASSERT(((source - this) & 3) == 0); | |
| 300 int offset = (source - this) >> kLiteralEntrySizeLog2; | |
| 301 Instr imm = Assembler::ImmLLiteral(offset); | |
| 302 Instr mask = ImmLLiteral_mask; | |
| 303 | |
| 304 SetInstructionBits(Mask(~mask) | imm); | |
| 305 } | |
| 306 | |
| 307 | |
| 308 // TODO(jbramley): We can't put this inline in the class because things like | |
| 309 // xzr and Register are not defined in that header. Consider adding | |
| 310 // instructions-a64-inl.h to work around this. | |
| 311 bool InstructionSequence::IsInlineData() const { | |
| 312 // Inline data is encoded as a single movz instruction which writes to xzr | |
| 313 // (x31). | |
| 314 return IsMovz() && SixtyFourBits() && (Rd() == xzr.code()); | |
| 315 // TODO(all): If we extend ::InlineData() to support bigger data, we need | |
| 316 // to update this method too. | |
| 317 } | |
| 318 | |
| 319 | |
| 320 // TODO(jbramley): We can't put this inline in the class because things like | |
| 321 // xzr and Register are not defined in that header. Consider adding | |
| 322 // instructions-a64-inl.h to work around this. | |
| 323 uint64_t InstructionSequence::InlineData() const { | |
| 324 ASSERT(IsInlineData()); | |
| 325 uint64_t payload = ImmMoveWide(); | |
| 326 // TODO(all): If we extend ::InlineData() to support bigger data, we need | |
| 327 // to update this method too. | |
| 328 return payload; | |
| 329 } | |
| 330 | |
| 331 | |
| 332 } } // namespace v8::internal | |
| 333 | |
| 334 #endif // V8_TARGET_ARCH_A64 | |
| OLD | NEW |