OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "v8.h" | |
29 | |
30 #if V8_TARGET_ARCH_A64 | |
31 | |
32 #define A64_DEFINE_FP_STATICS | |
33 | |
34 #include "a64/instructions-a64.h" | |
35 #include "a64/assembler-a64-inl.h" | |
36 | |
37 namespace v8 { | |
38 namespace internal { | |
39 | |
40 | |
41 bool Instruction::IsLoad() const { | |
42 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { | |
43 return false; | |
44 } | |
45 | |
46 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { | |
47 return Mask(LoadStorePairLBit) != 0; | |
48 } else { | |
49 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask)); | |
50 switch (op) { | |
51 case LDRB_w: | |
52 case LDRH_w: | |
53 case LDR_w: | |
54 case LDR_x: | |
55 case LDRSB_w: | |
56 case LDRSB_x: | |
57 case LDRSH_w: | |
58 case LDRSH_x: | |
59 case LDRSW_x: | |
60 case LDR_s: | |
61 case LDR_d: return true; | |
62 default: return false; | |
63 } | |
64 } | |
65 } | |
66 | |
67 | |
68 bool Instruction::IsStore() const { | |
69 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { | |
70 return false; | |
71 } | |
72 | |
73 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { | |
74 return Mask(LoadStorePairLBit) == 0; | |
75 } else { | |
76 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask)); | |
77 switch (op) { | |
78 case STRB_w: | |
79 case STRH_w: | |
80 case STR_w: | |
81 case STR_x: | |
82 case STR_s: | |
83 case STR_d: return true; | |
84 default: return false; | |
85 } | |
86 } | |
87 } | |
88 | |
89 | |
90 static uint64_t RotateRight(uint64_t value, | |
91 unsigned int rotate, | |
92 unsigned int width) { | |
93 ASSERT(width <= 64); | |
94 rotate &= 63; | |
95 return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) | | |
96 (value >> rotate); | |
97 } | |
98 | |
99 | |
100 static uint64_t RepeatBitsAcrossReg(unsigned reg_size, | |
101 uint64_t value, | |
102 unsigned width) { | |
103 ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || | |
104 (width == 32)); | |
105 ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); | |
106 uint64_t result = value & ((1UL << width) - 1UL); | |
107 for (unsigned i = width; i < reg_size; i *= 2) { | |
108 result |= (result << i); | |
109 } | |
110 return result; | |
111 } | |
112 | |
113 | |
114 // Logical immediates can't encode zero, so a return value of zero is used to | |
115 // indicate a failure case. Specifically, where the constraints on imm_s are not | |
116 // met. | |
117 uint64_t Instruction::ImmLogical() { | |
118 unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits; | |
119 int64_t n = BitN(); | |
120 int64_t imm_s = ImmSetBits(); | |
121 int64_t imm_r = ImmRotate(); | |
122 | |
123 // An integer is constructed from the n, imm_s and imm_r bits according to | |
124 // the following table: | |
125 // | |
126 // N imms immr size S R | |
127 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) | |
128 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) | |
129 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) | |
130 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) | |
131 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) | |
132 // 0 11110s xxxxxr 2 UInt(s) UInt(r) | |
133 // (s bits must not be all set) | |
134 // | |
135 // A pattern is constructed of size bits, where the least significant S+1 | |
136 // bits are set. The pattern is rotated right by R, and repeated across a | |
137 // 32 or 64-bit value, depending on destination register width. | |
138 // | |
139 | |
140 if (n == 1) { | |
141 if (imm_s == 0x3F) { | |
142 return 0; | |
143 } | |
144 uint64_t bits = (1UL << (imm_s + 1)) - 1; | |
145 return RotateRight(bits, imm_r, 64); | |
146 } else { | |
147 if ((imm_s >> 1) == 0x1F) { | |
148 return 0; | |
149 } | |
150 for (int width = 0x20; width >= 0x2; width >>= 1) { | |
151 if ((imm_s & width) == 0) { | |
152 int mask = width - 1; | |
153 if ((imm_s & mask) == mask) { | |
154 return 0; | |
155 } | |
156 uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1; | |
157 return RepeatBitsAcrossReg(reg_size, | |
158 RotateRight(bits, imm_r & mask, width), | |
159 width); | |
160 } | |
161 } | |
162 } | |
163 UNREACHABLE(); | |
164 return 0; | |
165 } | |
166 | |
167 | |
168 float Instruction::ImmFP32() { | |
169 // ImmFP: abcdefgh (8 bits) | |
170 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) | |
171 // where B is b ^ 1 | |
172 uint32_t bits = ImmFP(); | |
173 uint32_t bit7 = (bits >> 7) & 0x1; | |
174 uint32_t bit6 = (bits >> 6) & 0x1; | |
175 uint32_t bit5_to_0 = bits & 0x3f; | |
176 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); | |
177 | |
178 return rawbits_to_float(result); | |
179 } | |
180 | |
181 | |
182 double Instruction::ImmFP64() { | |
183 // ImmFP: abcdefgh (8 bits) | |
184 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 | |
185 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) | |
186 // where B is b ^ 1 | |
187 uint32_t bits = ImmFP(); | |
188 uint64_t bit7 = (bits >> 7) & 0x1; | |
189 uint64_t bit6 = (bits >> 6) & 0x1; | |
190 uint64_t bit5_to_0 = bits & 0x3f; | |
191 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); | |
192 | |
193 return rawbits_to_double(result); | |
194 } | |
195 | |
196 | |
197 LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { | |
198 switch (op) { | |
199 case STP_x: | |
200 case LDP_x: | |
201 case STP_d: | |
202 case LDP_d: return LSDoubleWord; | |
203 default: return LSWord; | |
204 } | |
205 } | |
206 | |
207 | |
208 ptrdiff_t Instruction::ImmPCOffset() { | |
209 ptrdiff_t offset; | |
210 if (IsPCRelAddressing()) { | |
211 // PC-relative addressing. Only ADR is supported. | |
212 offset = ImmPCRel(); | |
213 } else if (BranchType() != UnknownBranchType) { | |
214 // All PC-relative branches. | |
215 // Relative branch offsets are instruction-size-aligned. | |
216 offset = ImmBranch() << kInstructionSizeLog2; | |
217 } else { | |
218 // Load literal (offset from PC). | |
219 ASSERT(IsLdrLiteral()); | |
220 // The offset is always shifted by 2 bits, even for loads to 64-bits | |
221 // registers. | |
222 offset = ImmLLiteral() << kInstructionSizeLog2; | |
223 } | |
224 return offset; | |
225 } | |
226 | |
227 | |
228 Instruction* Instruction::ImmPCOffsetTarget() { | |
229 return InstructionAtOffset(ImmPCOffset()); | |
230 } | |
231 | |
232 | |
233 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, | |
234 int32_t offset) { | |
235 return is_intn(offset, ImmBranchRangeBitwidth(branch_type)); | |
236 } | |
237 | |
238 | |
239 bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) { | |
240 return IsValidImmPCOffset(BranchType(), DistanceTo(target)); | |
241 } | |
242 | |
243 | |
244 void Instruction::SetImmPCOffsetTarget(Instruction* target) { | |
245 if (IsPCRelAddressing()) { | |
246 SetPCRelImmTarget(target); | |
247 } else if (BranchType() != UnknownBranchType) { | |
248 SetBranchImmTarget(target); | |
249 } else { | |
250 SetImmLLiteral(target); | |
251 } | |
252 } | |
253 | |
254 | |
255 void Instruction::SetPCRelImmTarget(Instruction* target) { | |
256 // ADRP is not supported, so 'this' must point to an ADR instruction. | |
257 ASSERT(Mask(PCRelAddressingMask) == ADR); | |
258 | |
259 Instr imm = Assembler::ImmPCRelAddress(DistanceTo(target)); | |
260 | |
261 SetInstructionBits(Mask(~ImmPCRel_mask) | imm); | |
262 } | |
263 | |
264 | |
265 void Instruction::SetBranchImmTarget(Instruction* target) { | |
266 ASSERT(IsAligned(DistanceTo(target), kInstructionSize)); | |
267 Instr branch_imm = 0; | |
268 uint32_t imm_mask = 0; | |
269 ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2; | |
270 switch (BranchType()) { | |
271 case CondBranchType: { | |
272 branch_imm = Assembler::ImmCondBranch(offset); | |
273 imm_mask = ImmCondBranch_mask; | |
274 break; | |
275 } | |
276 case UncondBranchType: { | |
277 branch_imm = Assembler::ImmUncondBranch(offset); | |
278 imm_mask = ImmUncondBranch_mask; | |
279 break; | |
280 } | |
281 case CompareBranchType: { | |
282 branch_imm = Assembler::ImmCmpBranch(offset); | |
283 imm_mask = ImmCmpBranch_mask; | |
284 break; | |
285 } | |
286 case TestBranchType: { | |
287 branch_imm = Assembler::ImmTestBranch(offset); | |
288 imm_mask = ImmTestBranch_mask; | |
289 break; | |
290 } | |
291 default: UNREACHABLE(); | |
292 } | |
293 SetInstructionBits(Mask(~imm_mask) | branch_imm); | |
294 } | |
295 | |
296 | |
297 void Instruction::SetImmLLiteral(Instruction* source) { | |
298 ASSERT(IsAligned(DistanceTo(source), kInstructionSize)); | |
299 ptrdiff_t offset = DistanceTo(source) >> kLiteralEntrySizeLog2; | |
300 Instr imm = Assembler::ImmLLiteral(offset); | |
301 Instr mask = ImmLLiteral_mask; | |
302 | |
303 SetInstructionBits(Mask(~mask) | imm); | |
304 } | |
305 | |
306 | |
307 // TODO(jbramley): We can't put this inline in the class because things like | |
308 // xzr and Register are not defined in that header. Consider adding | |
309 // instructions-a64-inl.h to work around this. | |
310 bool InstructionSequence::IsInlineData() const { | |
311 // Inline data is encoded as a single movz instruction which writes to xzr | |
312 // (x31). | |
313 return IsMovz() && SixtyFourBits() && (Rd() == xzr.code()); | |
314 // TODO(all): If we extend ::InlineData() to support bigger data, we need | |
315 // to update this method too. | |
316 } | |
317 | |
318 | |
319 // TODO(jbramley): We can't put this inline in the class because things like | |
320 // xzr and Register are not defined in that header. Consider adding | |
321 // instructions-a64-inl.h to work around this. | |
322 uint64_t InstructionSequence::InlineData() const { | |
323 ASSERT(IsInlineData()); | |
324 uint64_t payload = ImmMoveWide(); | |
325 // TODO(all): If we extend ::InlineData() to support bigger data, we need | |
326 // to update this method too. | |
327 return payload; | |
328 } | |
329 | |
330 | |
331 } } // namespace v8::internal | |
332 | |
333 #endif // V8_TARGET_ARCH_A64 | |
OLD | NEW |