Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(332)

Side by Side Diff: src/arm/assembler-thumb32.cc

Issue 24182004: Thumb2 Backend: 32-bit instruction encoding helper methods Base URL: HEAD^
Patch Set: Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm/assembler-arm-inl.h ('k') | src/arm/constants-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if defined(V8_TARGET_ARCH_ARM)
31
32 #include "arm/assembler-arm-inl.h"
33 #include "serialize.h"
34
35 namespace v8 {
36 namespace internal {
37
38 Instr Assembler::thumb32_mode1(ThumbOpcode32Mode1 op, SBit s) {
39 return (BH15 | BH14 | BH13 | BH12 | op*BH5 | s);
40 }
41
42
43 Instr Assembler::thumb32_mode3(ThumbOpcode32Mode3 op) {
44 return (BH15 | BH14 | BH13 | BH12 | BH9 | op*BH4);
45 }
46
47
48 Instr Assembler::thumb32_mode4(ThumbOpcode32Mode4 op) {
49 return (BH15 | BH14 | BH13 | BH12 | B15 | op*B12);
50 }
51
52
53 Instr Assembler::thumb32_mode5() {
54 return (BH15 | BH14 | BH13 | BH11);
55 }
56
57
58 Instr Assembler::thumb32_mode6(ThumbOpcode32Mode6 op1,
59 ThumbOpcode32Mode6 op2) {
60 return (BH15 | BH14 | BH13 | BH11 | op1*BH7 | BH6 | op2*BH4);
61 }
62
63
64 Instr Assembler::thumb32_mode7(ThumbOpcode32Mode7 op) {
65 return (BH15 | BH14 | BH13 | BH12 | BH11 | op*BH7 | BH6 | BH4);
66 }
67
68
69 Instr Assembler::thumb32_mode8(ThumbOpcode32Mode8 op) {
70 return (BH15 | BH14 | BH13 | BH12 | BH11 | op*BH7 | BH5 | BH4);
71 }
72
73
74 Instr Assembler::thumb32_mode9(ThumbOpcode32Mode9 op) {
75 return (BH15 | BH14 | BH13 | BH12 | BH11 | op*BH7 | BH4);
76 }
77
78
79 Instr Assembler::thumb32_mode10(ThumbOpcode32Mode10 op) {
80 return (BH15 | BH14 | BH13 | BH12 | BH11 | op*BH5);
81 }
82
83
84 Instr Assembler::thumb32_mode11(ThumbOpcode32Mode11 op, SBit s) {
85 return (BH15 | BH14 | BH13 | BH11 | BH9 | op*BH5 | s);
86 }
87
88
89 Instr Assembler::thumb32_mode12(ThumbOpcode32Mode12 op1) {
90 return (BH15 | BH14 | BH13 | BH12 | BH11 | BH9 | op1*BH4);
91 }
92
93
94 Instr Assembler::thumb32_mode16(ThumbOpcode32Mode16 op2) {
95 return (BH15 | BH14 | BH13 | BH12 | BH11 | BH9 | BH8 | op2*B4);
96 }
97
98
99 Instr Assembler::thumb32_mode17(ThumbOpcode32Mode17 op1) {
100 return (BH15 | BH14 | BH13 | BH12 | BH11 | BH9 | BH8 | BH7 | op1*BH4);
101 }
102
103
104 // used by 32-bit mode 1 instructions
105 bool Assembler::thumb_expand_imm(uint32_t imm32,
106 uint32_t* i,
107 uint32_t* imm3,
108 uint32_t* imm8) {
109 // 00000000 00000000 00000000 abcdefgh
110 if ((imm32 & 0xFFFFFF00) == 0) {
111 *imm8 = imm32; // abcdefgh not 1bcdefgh
112 *i = 0; // i_imm3 == '0000x'
113 *imm3 = 0;
114 return true;
115 }
116 // 00000000 abcdefgh 00000000 abcdefgh
117 if (((0xff00ff00 & imm32) == 0) &&
118 (((0xff0000 & imm32) >> 16) == (0xff & imm32))) {
119 *i = 0;
120 *imm3 = 1; // i:imm3:a == '0001x"
121 *imm8 = imm32 & 0xff;
122 return true;
123 }
124 // abcdefgh 00000000 abcdefgh 00000000
125 if (((0x00ff00ff & imm32) == 0) &&
126 (((0xff000000 & imm32) >> 16) == (0xff00 & imm32))) {
127 *i = 0;
128 *imm3 = 2; // i:imm3:a == '0010x"
129 *imm8 = (imm32 & 0xff00) >> 8;
130 return true;
131 }
132 // abcdefgh abcdefgh abcdefgh abcdefgh
133 if (((0xffff0000 & imm32) >> 16 == (0xffff & imm32)) &&
134 (((0xff00 & imm32) >> 8) == (0xff & imm32))) {
135 *i = 0;
136 *imm3 = 3; // i:imm3:a == '0010x"
137 *imm8 = imm32 & 0xff;
138 return true;
139 }
140
141 // <0's> (a=1)bcdefgh <0's>
142 // look for the lowest bit set first, to fail faster (the most common case)
143 if ((imm32 & 0xFFF80000) && (imm32 & 0xFFF))
144 return 0; // short circuit - ON bits too far apart to fit
145
146 int lowestbyteOn = 0;
147 for (lowestbyteOn = 0; lowestbyteOn < 4; lowestbyteOn++)
148 if (imm32 & (0xff << lowestbyteOn*8))
149 break; // bytenum is the lowest byte with any bit on
150
151 // because case '0000x' is above, value is not 0,
152 // so bytenum must be less than 4
153 int bitnum = 0; // find lowest bit ON
154 for (bitnum = (lowestbyteOn)*8; bitnum < (lowestbyteOn+1)*8; bitnum++)
155 if (imm32 & (1 << bitnum))
156 break; // this is the bottom bit on
157
158 // bitnum must < 32
159 if ((bitnum < (lowestbyteOn+1)*8) &&
160 ((imm32 & ~(0xff << bitnum)) == 0)) { // then fits this pattern
161 // now need top bit ON; which becomes 'a' in 1bcdefgh pattern
162 int top_bit_on = (bitnum + 7 < 32) ? bitnum + 7 : 31;
163 while ((imm32 & (1 << top_bit_on)) == 0)
164 top_bit_on--;
165
166 // i:imm3:a goes from 01001 to 11111, so 39 - i:imm3:a goes from 30 to 8
167 // 39 - i:imm3:a = top_bit_on;
168 int i_imm3_a = 39 - top_bit_on;
169 *i = (i_imm3_a >> 4) & 0x1;
170 *imm3 = (i_imm3_a >> 1) & 0x7;
171
172 *imm8 = imm32 >> (top_bit_on - 7);
173 if ((i_imm3_a & 0x1) == 0)
174 *imm8 = *imm8 & 0x7f; // 1bcdefgh
175
176 return true;
177 }
178
179 *i = *imm3 = *imm8 = 0;
180 return false;
181 }
182
183
184 Instr Assembler::thumb32_sign_extend_imm24(int imm) {
185 uint32_t imm11 = imm & 0x7ff;
186 uint32_t imm10 = (imm >> 11) & 0x3ff;
187 uint32_t i2 = (imm >> 21) & 1;
188 uint32_t i1 = (imm >> 22) & 1;
189 uint32_t s = (imm >> 23) & 1;
190 uint32_t j1 = (~(i1 ^ s)) & 1;
191 uint32_t j2 = (~(i2 ^ s)) & 1;
192 return (s*BH10 | imm10*BH0 | j1*B13 | j2*B11 | imm11);
193 }
194
195
196 Instr Assembler::thumb32_2reg_zero_extend_imm12(Register rd,
197 const MemOperand& x) {
198 ASSERT(!x.rm_.is_valid()); // is Immediate.
199 uint32_t offset = x.offset_;
200 uint32_t sign = U;
201 if (x.rn_.code() == 15) {
202 if (x.offset_ < 0) {
203 sign = 0;
204 offset = -x.offset_;
205 }
206 }
207 ASSERT(is_uint12(offset));
208 return (sign | x.rn_.code()*BH0 | rd.code()*B12 | offset);
209 }
210
211
212 Instr Assembler::thumb32_2reg_zero_extend_imm8(Register rd,
213 const MemOperand& x) {
214 ASSERT(!x.rm_.is_valid()); // is Immediate.
215 int am = x.am_;
216 int offset_8 = x.offset_;
217 if (offset_8 < 0) {
218 offset_8 = -offset_8;
219 am ^= U;
220 }
221 ASSERT(is_uint8(offset_8));
222 int thumbP = (am & P) > 0 ? B10 : 0;
223 int thumbU = (am & U) > 0 ? B9 : 0;
224 int thumbW = (am & W) > 0 ? B8 : 0;
225 if (thumbP == 0) {
226 thumbW = B8;
227 }
228 return (x.rn_.code()*BH0 | rd.code()*B12 | B11 | thumbP | thumbU |
229 thumbW | offset_8);
230 }
231
232
233 // Mode 6
234 Instr Assembler::thumb32_3reg_zero_extend_imm8(Register rt,
235 Register rt2,
236 const MemOperand& x) {
237 int am = x.am_;
238 int offset_8 = x.offset_;
239 if (offset_8 < 0) {
240 offset_8 = -offset_8;
241 am ^= U;
242 }
243 // should we just use 'am' instead of thumb[P|U|W]?
244 int thumbP = (am & P) > 0 ? BH8 : 0;
245 int thumbU = (am & U) > 0 ? BH7 : 0;
246 int thumbW = (am & W) > 0 ? BH5 : 0;
247 // PU W Rn Rt Rt2 imm8
248 return (thumbP | thumbU | thumbW | x.rn_.code()*BH0 |
249 rt.code()*B12 | rt2.code()*B8 | offset_8);
250 }
251
252
253 Instr Assembler::thumb32_2reg_zero_extend_imm_split(Register rn,
254 Register rd,
255 const Operand& x) {
256 ASSERT(!x.rm_.is_valid()); // is Immediate.
257 ASSERT(is_uint12(x.imm32_));
258 uint32_t i = (x.imm32_ >> 11) & 1;
259 uint32_t imm3 = (x.imm32_ >> 8) & 7;
260 uint32_t imm8 = x.imm32_ & 0xff;
261 return (i*BH10 | rn.code()*BH0 | imm3*B12 | rd.code()*B8 | imm8);
262 }
263
264
265 // MOV imm T3 and MOVT T1 set imm4, where others in mode 3 set Rn
266 Instr Assembler::thumb32_1reg_zero_extend_imm_split_4i38(Register rd,
267 uint32_t imm) {
268 ASSERT(is_uint16(imm));
269 uint32_t imm4 = (imm >> 12) & 0xf;
270 uint32_t i = (imm >> 11) & 1;
271 uint32_t imm3 = (imm >> 8) & 7;
272 uint32_t imm8 = imm & 0xff;
273 return (i*BH10 | imm4*BH0 | imm3*B12 | rd.code()*B8 | imm8);
274 }
275
276
277 // common in mode 1; some instruction use 1 reg, set other to pc
278 // ex: MOV, MVN no Rm, TST,TEQ,CMN,CMP, no Rd
279 // otherwise~: i S Rn imm3 Rd imm8
280 Instr Assembler::thumb32_2reg_thumb_expand_imm(Register rd,
281 Register rn,
282 uint32_t i,
283 uint32_t imm3,
284 uint32_t imm8) {
285 return (i*BH10 | rn.code()*BH0 | imm3*B12 | rd.code()*B8 | imm8);
286 }
287
288
289 // common in mode 11; some instruction use 2 regs, set other to pc
290 // ex: MOV, MVN no Rn, TST no Rm, TEQ,CMN,CMP, no Rd
291 Instr Assembler::thumb32_3reg_shift_imm8(Register rn,
292 Register rd,
293 const Operand& x) {
294 ASSERT(x.rm_.is_valid() && !x.rs_.is_valid()); // is Register & not shift
295 ASSERT(is_uint5(x.shift_imm_));
296 uint8_t imm3 = x.shift_imm_ >> 2;
297 uint8_t imm2 = x.shift_imm_ & 3;
298 return (rn.code()*BH0 | imm3*B12 | rd.code()*B8 | imm2*B6 |
299 (x.shift_op_>> 1) | x.rm_.code());
300 }
301
302
303 // also used for usat
304 Instr Assembler::thumb32_3reg_shift(Register rd,
305 const Operand& x) {
306 ASSERT(x.rm_.is_valid()); // is Register
307 ASSERT(x.rs_.is_valid()); // is shift
308 return (x.rm_.code()*BH0 | B15 | B14 | B13 | B12 |
309 rd.code()*B8 | x.rs_.code());
310 }
311
312
313 // also used for usat
314 Instr Assembler::thumb32_bit_field(Register rn,
315 Register rd,
316 int split_imm,
317 int lower_imm) {
318 int imm3 = split_imm >> 2;
319 int imm2 = split_imm & 3;
320 return (rn.code()*BH0 | imm3*B12 | rd.code()*B8 | imm2*B6 | lower_imm);
321 }
322
323
324 Instr Assembler::thumb32_3reg_lsl(Register rd,
325 const MemOperand& x) {
326 ASSERT(x.rn_.is_valid() && x.rm_.is_valid()); // is Register, both valid
327 uint8_t imm2 = 0;
328 if (x.shift_op_ == LSL && is_uint2(x.shift_imm_)) {
329 imm2 = x.shift_imm_ & 3;
330 return (x.rn_.code()*BH0 | rd.code()*B12 | imm2*B4 | x.rm_.code());
331 }
332 switch (x.shift_op_) {
333 case LSL: // TODO(rkrithiv): call method to encode lsl instruction
334 case LSR: // TODO(rkrithiv): call method to encode lsr instruction
335 case ASR: // TODO(rkrithiv): call method to encode asr instruction
336 default: return (x.rn_.code()*BH0 | rd.code()*B12 | x.rm_.code());
337 }
338 return (x.rn_.code()*BH0 | rd.code()*B12 | ip.code());
339 }
340
341
342 Instr Assembler::thumb32_4reg(Register dst, Register src1, Register src2,
343 Register srcA) {
344 return (src1.code()*BH0 | srcA.code()*B12 | dst.code()*B8 | src2.code());
345 }
346
347
348 uint16_t Assembler::thumb32_movw_immediate(Instr instr) {
349 uint16_t i = (instr >> 26) & 1;
350 uint16_t imm4 = (instr >> 16) & 15;
351 uint16_t imm3 = (instr >> 12) & 7;
352 uint16_t imm8 = instr & 0xff;
353 return ((imm4 << 12) | (i << 11) | (imm3 << 8) | imm8);
354 }
355
356
357 Instr Assembler::thumb32_set_movw_immediate(uint32_t imm) {
358 ASSERT(is_uint16(imm));
359 uint32_t imm4 = (imm >> 12) & 0xf;
360 uint32_t i = (imm >> 11) & 1;
361 uint32_t imm3 = (imm >> 8) & 7;
362 uint32_t imm8 = imm & 0xff;
363 return (i*BH10 | imm4*BH0 | imm3*B12 | imm8);
364 }
365
366
367 Instr Assembler::thumb32_instr_at(Address addr) {
368 return (Memory::int16_at(addr) << 16) | (Memory::int16_at(addr + 2) & 0xffff);
369 }
370
371
372 void Assembler::thumb32_instr_at_put(int pos, Instr instr) {
373 *reinterpret_cast<Instr16*>(buffer_ + pos) = instr >> 16;
374 *reinterpret_cast<Instr16*>(buffer_ + pos + kInstr16Size) = instr & 0xFFFF;
375 }
376
377
378 void Assembler::thumb32_instr_at_put(byte* pc, Instr instr) {
379 *reinterpret_cast<Instr16*>(pc) = instr >> 16;
380 *reinterpret_cast<Instr16*>(pc + kInstr16Size) = instr & 0xFFFF;
381 }
382
383 } } // namespace v8::internal
384
385 #endif // V8_TARGET_ARCH_ARM
386
OLDNEW
« no previous file with comments | « src/arm/assembler-arm-inl.h ('k') | src/arm/constants-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698