OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" |
10 #include "vm/longjump.h" | 10 #include "vm/longjump.h" |
11 #include "vm/runtime_entry.h" | 11 #include "vm/runtime_entry.h" |
12 #include "vm/simulator.h" | 12 #include "vm/simulator.h" |
13 #include "vm/stack_frame.h" | 13 #include "vm/stack_frame.h" |
14 #include "vm/stub_code.h" | 14 #include "vm/stub_code.h" |
15 | 15 |
16 // An extra check since we are assuming the existence of /proc/cpuinfo below. | 16 // An extra check since we are assuming the existence of /proc/cpuinfo below. |
17 #if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \ | 17 #if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \ |
18 !TARGET_OS_IOS | 18 !TARGET_OS_IOS |
19 #error ARM cross-compile only supported on Linux | 19 #error ARM cross-compile only supported on Linux |
20 #endif | 20 #endif |
21 | 21 |
22 namespace dart { | 22 namespace dart { |
23 | 23 |
24 DECLARE_FLAG(bool, check_code_pointer); | 24 DECLARE_FLAG(bool, check_code_pointer); |
25 DECLARE_FLAG(bool, inline_alloc); | 25 DECLARE_FLAG(bool, inline_alloc); |
26 | 26 |
27 uint32_t Address::encoding3() const { | 27 uint32_t Address::encoding3() const { |
28 if (kind_ == Immediate) { | 28 if (kind_ == Immediate) { |
29 uint32_t offset = encoding_ & kOffset12Mask; | 29 uint32_t offset = encoding_ & kOffset12Mask; |
30 ASSERT(offset < 256); | 30 ASSERT(offset < 256); |
31 return (encoding_ & ~kOffset12Mask) | B22 | | 31 return (encoding_ & ~kOffset12Mask) | B22 | ((offset & 0xf0) << 4) | |
32 ((offset & 0xf0) << 4) | (offset & 0xf); | 32 (offset & 0xf); |
33 } | 33 } |
34 ASSERT(kind_ == IndexRegister); | 34 ASSERT(kind_ == IndexRegister); |
35 return encoding_; | 35 return encoding_; |
36 } | 36 } |
37 | 37 |
38 | 38 |
39 uint32_t Address::vencoding() const { | 39 uint32_t Address::vencoding() const { |
40 ASSERT(kind_ == Immediate); | 40 ASSERT(kind_ == Immediate); |
41 uint32_t offset = encoding_ & kOffset12Mask; | 41 uint32_t offset = encoding_ & kOffset12Mask; |
42 ASSERT(offset < (1 << 10)); // In the range 0 to +1020. | 42 ASSERT(offset < (1 << 10)); // In the range 0 to +1020. |
43 ASSERT(Utils::IsAligned(offset, 4)); // Multiple of 4. | 43 ASSERT(Utils::IsAligned(offset, 4)); // Multiple of 4. |
44 int mode = encoding_ & ((8|4|1) << 21); | 44 int mode = encoding_ & ((8 | 4 | 1) << 21); |
45 ASSERT((mode == Offset) || (mode == NegOffset)); | 45 ASSERT((mode == Offset) || (mode == NegOffset)); |
46 uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2); | 46 uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2); |
47 if (mode == Offset) { | 47 if (mode == Offset) { |
48 vencoding |= 1 << 23; | 48 vencoding |= 1 << 23; |
49 } | 49 } |
50 return vencoding; | 50 return vencoding; |
51 } | 51 } |
52 | 52 |
53 | 53 |
54 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { | 54 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { |
(...skipping 18 matching lines...) Expand all Loading... |
73 Opcode opcode, | 73 Opcode opcode, |
74 int set_cc, | 74 int set_cc, |
75 Register rn, | 75 Register rn, |
76 Register rd, | 76 Register rd, |
77 Operand o) { | 77 Operand o) { |
78 ASSERT(rd != kNoRegister); | 78 ASSERT(rd != kNoRegister); |
79 ASSERT(cond != kNoCondition); | 79 ASSERT(cond != kNoCondition); |
80 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 80 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
81 type << kTypeShift | | 81 type << kTypeShift | |
82 static_cast<int32_t>(opcode) << kOpcodeShift | | 82 static_cast<int32_t>(opcode) << kOpcodeShift | |
83 set_cc << kSShift | | 83 set_cc << kSShift | static_cast<int32_t>(rn) << kRnShift | |
84 static_cast<int32_t>(rn) << kRnShift | | 84 static_cast<int32_t>(rd) << kRdShift | o.encoding(); |
85 static_cast<int32_t>(rd) << kRdShift | | |
86 o.encoding(); | |
87 Emit(encoding); | 85 Emit(encoding); |
88 } | 86 } |
89 | 87 |
90 | 88 |
91 void Assembler::EmitType5(Condition cond, int32_t offset, bool link) { | 89 void Assembler::EmitType5(Condition cond, int32_t offset, bool link) { |
92 ASSERT(cond != kNoCondition); | 90 ASSERT(cond != kNoCondition); |
93 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 91 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
94 5 << kTypeShift | | 92 5 << kTypeShift | (link ? 1 : 0) << kLinkShift; |
95 (link ? 1 : 0) << kLinkShift; | |
96 Emit(Assembler::EncodeBranchOffset(offset, encoding)); | 93 Emit(Assembler::EncodeBranchOffset(offset, encoding)); |
97 } | 94 } |
98 | 95 |
99 | 96 |
100 void Assembler::EmitMemOp(Condition cond, | 97 void Assembler::EmitMemOp(Condition cond, |
101 bool load, | 98 bool load, |
102 bool byte, | 99 bool byte, |
103 Register rd, | 100 Register rd, |
104 Address ad) { | 101 Address ad) { |
105 ASSERT(rd != kNoRegister); | 102 ASSERT(rd != kNoRegister); |
106 ASSERT(cond != kNoCondition); | 103 ASSERT(cond != kNoCondition); |
107 ASSERT(!ad.has_writeback() || (ad.rn() != rd)); // Unpredictable. | 104 ASSERT(!ad.has_writeback() || (ad.rn() != rd)); // Unpredictable. |
108 | 105 |
109 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 106 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 | |
110 B26 | (ad.kind() == Address::Immediate ? 0 : B25) | | 107 (ad.kind() == Address::Immediate ? 0 : B25) | |
111 (load ? L : 0) | | 108 (load ? L : 0) | (byte ? B : 0) | |
112 (byte ? B : 0) | | 109 (static_cast<int32_t>(rd) << kRdShift) | ad.encoding(); |
113 (static_cast<int32_t>(rd) << kRdShift) | | |
114 ad.encoding(); | |
115 Emit(encoding); | 110 Emit(encoding); |
116 } | 111 } |
117 | 112 |
118 | 113 |
119 void Assembler::EmitMemOpAddressMode3(Condition cond, | 114 void Assembler::EmitMemOpAddressMode3(Condition cond, |
120 int32_t mode, | 115 int32_t mode, |
121 Register rd, | 116 Register rd, |
122 Address ad) { | 117 Address ad) { |
123 ASSERT(rd != kNoRegister); | 118 ASSERT(rd != kNoRegister); |
124 ASSERT(cond != kNoCondition); | 119 ASSERT(cond != kNoCondition); |
125 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 120 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | mode | |
126 mode | | 121 (static_cast<int32_t>(rd) << kRdShift) | ad.encoding3(); |
127 (static_cast<int32_t>(rd) << kRdShift) | | |
128 ad.encoding3(); | |
129 Emit(encoding); | 122 Emit(encoding); |
130 } | 123 } |
131 | 124 |
132 | 125 |
133 void Assembler::EmitMultiMemOp(Condition cond, | 126 void Assembler::EmitMultiMemOp(Condition cond, |
134 BlockAddressMode am, | 127 BlockAddressMode am, |
135 bool load, | 128 bool load, |
136 Register base, | 129 Register base, |
137 RegList regs) { | 130 RegList regs) { |
138 ASSERT(base != kNoRegister); | 131 ASSERT(base != kNoRegister); |
139 ASSERT(cond != kNoCondition); | 132 ASSERT(cond != kNoCondition); |
140 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 133 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
141 B27 | | 134 am | (load ? L : 0) | |
142 am | | 135 (static_cast<int32_t>(base) << kRnShift) | regs; |
143 (load ? L : 0) | | |
144 (static_cast<int32_t>(base) << kRnShift) | | |
145 regs; | |
146 Emit(encoding); | 136 Emit(encoding); |
147 } | 137 } |
148 | 138 |
149 | 139 |
150 void Assembler::EmitShiftImmediate(Condition cond, | 140 void Assembler::EmitShiftImmediate(Condition cond, |
151 Shift opcode, | 141 Shift opcode, |
152 Register rd, | 142 Register rd, |
153 Register rm, | 143 Register rm, |
154 Operand o) { | 144 Operand o) { |
155 ASSERT(cond != kNoCondition); | 145 ASSERT(cond != kNoCondition); |
156 ASSERT(o.type() == 1); | 146 ASSERT(o.type() == 1); |
157 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 147 int32_t encoding = |
158 static_cast<int32_t>(MOV) << kOpcodeShift | | 148 static_cast<int32_t>(cond) << kConditionShift | |
159 static_cast<int32_t>(rd) << kRdShift | | 149 static_cast<int32_t>(MOV) << kOpcodeShift | |
160 o.encoding() << kShiftImmShift | | 150 static_cast<int32_t>(rd) << kRdShift | o.encoding() << kShiftImmShift | |
161 static_cast<int32_t>(opcode) << kShiftShift | | 151 static_cast<int32_t>(opcode) << kShiftShift | static_cast<int32_t>(rm); |
162 static_cast<int32_t>(rm); | |
163 Emit(encoding); | 152 Emit(encoding); |
164 } | 153 } |
165 | 154 |
166 | 155 |
167 void Assembler::EmitShiftRegister(Condition cond, | 156 void Assembler::EmitShiftRegister(Condition cond, |
168 Shift opcode, | 157 Shift opcode, |
169 Register rd, | 158 Register rd, |
170 Register rm, | 159 Register rm, |
171 Operand o) { | 160 Operand o) { |
172 ASSERT(cond != kNoCondition); | 161 ASSERT(cond != kNoCondition); |
173 ASSERT(o.type() == 0); | 162 ASSERT(o.type() == 0); |
174 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 163 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
175 static_cast<int32_t>(MOV) << kOpcodeShift | | 164 static_cast<int32_t>(MOV) << kOpcodeShift | |
176 static_cast<int32_t>(rd) << kRdShift | | 165 static_cast<int32_t>(rd) << kRdShift | |
177 o.encoding() << kShiftRegisterShift | | 166 o.encoding() << kShiftRegisterShift | |
178 static_cast<int32_t>(opcode) << kShiftShift | | 167 static_cast<int32_t>(opcode) << kShiftShift | B4 | |
179 B4 | | |
180 static_cast<int32_t>(rm); | 168 static_cast<int32_t>(rm); |
181 Emit(encoding); | 169 Emit(encoding); |
182 } | 170 } |
183 | 171 |
184 | 172 |
185 void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) { | 173 void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) { |
186 EmitType01(cond, o.type(), AND, 0, rn, rd, o); | 174 EmitType01(cond, o.type(), AND, 0, rn, rd, o); |
187 } | 175 } |
188 | 176 |
189 | 177 |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
304 EmitType01(cond, o.type(), MVN, 1, R0, rd, o); | 292 EmitType01(cond, o.type(), MVN, 1, R0, rd, o); |
305 } | 293 } |
306 | 294 |
307 | 295 |
308 void Assembler::clz(Register rd, Register rm, Condition cond) { | 296 void Assembler::clz(Register rd, Register rm, Condition cond) { |
309 ASSERT(rd != kNoRegister); | 297 ASSERT(rd != kNoRegister); |
310 ASSERT(rm != kNoRegister); | 298 ASSERT(rm != kNoRegister); |
311 ASSERT(cond != kNoCondition); | 299 ASSERT(cond != kNoCondition); |
312 ASSERT(rd != PC); | 300 ASSERT(rd != PC); |
313 ASSERT(rm != PC); | 301 ASSERT(rm != PC); |
314 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 302 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
315 B24 | B22 | B21 | (0xf << 16) | | 303 B22 | B21 | (0xf << 16) | |
316 (static_cast<int32_t>(rd) << kRdShift) | | 304 (static_cast<int32_t>(rd) << kRdShift) | (0xf << 8) | B4 | |
317 (0xf << 8) | B4 | static_cast<int32_t>(rm); | 305 static_cast<int32_t>(rm); |
318 Emit(encoding); | 306 Emit(encoding); |
319 } | 307 } |
320 | 308 |
321 | 309 |
322 void Assembler::movw(Register rd, uint16_t imm16, Condition cond) { | 310 void Assembler::movw(Register rd, uint16_t imm16, Condition cond) { |
323 ASSERT(cond != kNoCondition); | 311 ASSERT(cond != kNoCondition); |
324 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 312 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 | |
325 B25 | B24 | ((imm16 >> 12) << 16) | | 313 ((imm16 >> 12) << 16) | |
326 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); | 314 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); |
327 Emit(encoding); | 315 Emit(encoding); |
328 } | 316 } |
329 | 317 |
330 | 318 |
331 void Assembler::movt(Register rd, uint16_t imm16, Condition cond) { | 319 void Assembler::movt(Register rd, uint16_t imm16, Condition cond) { |
332 ASSERT(cond != kNoCondition); | 320 ASSERT(cond != kNoCondition); |
333 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 321 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 | |
334 B25 | B24 | B22 | ((imm16 >> 12) << 16) | | 322 B22 | ((imm16 >> 12) << 16) | |
335 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); | 323 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); |
336 Emit(encoding); | 324 Emit(encoding); |
337 } | 325 } |
338 | 326 |
339 | 327 |
340 void Assembler::EmitMulOp(Condition cond, int32_t opcode, | 328 void Assembler::EmitMulOp(Condition cond, |
341 Register rd, Register rn, | 329 int32_t opcode, |
342 Register rm, Register rs) { | 330 Register rd, |
| 331 Register rn, |
| 332 Register rm, |
| 333 Register rs) { |
343 ASSERT(rd != kNoRegister); | 334 ASSERT(rd != kNoRegister); |
344 ASSERT(rn != kNoRegister); | 335 ASSERT(rn != kNoRegister); |
345 ASSERT(rm != kNoRegister); | 336 ASSERT(rm != kNoRegister); |
346 ASSERT(rs != kNoRegister); | 337 ASSERT(rs != kNoRegister); |
347 ASSERT(cond != kNoCondition); | 338 ASSERT(cond != kNoCondition); |
348 int32_t encoding = opcode | | 339 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) | |
349 (static_cast<int32_t>(cond) << kConditionShift) | | 340 (static_cast<int32_t>(rn) << kRnShift) | |
350 (static_cast<int32_t>(rn) << kRnShift) | | 341 (static_cast<int32_t>(rd) << kRdShift) | |
351 (static_cast<int32_t>(rd) << kRdShift) | | 342 (static_cast<int32_t>(rs) << kRsShift) | B7 | B4 | |
352 (static_cast<int32_t>(rs) << kRsShift) | | 343 (static_cast<int32_t>(rm) << kRmShift); |
353 B7 | B4 | | |
354 (static_cast<int32_t>(rm) << kRmShift); | |
355 Emit(encoding); | 344 Emit(encoding); |
356 } | 345 } |
357 | 346 |
358 | 347 |
359 void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) { | 348 void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) { |
360 // Assembler registers rd, rn, rm are encoded as rn, rm, rs. | 349 // Assembler registers rd, rn, rm are encoded as rn, rm, rs. |
361 EmitMulOp(cond, 0, R0, rd, rn, rm); | 350 EmitMulOp(cond, 0, R0, rd, rn, rm); |
362 } | 351 } |
363 | 352 |
364 | 353 |
365 // Like mul, but sets condition flags. | 354 // Like mul, but sets condition flags. |
366 void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) { | 355 void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) { |
367 EmitMulOp(cond, B20, R0, rd, rn, rm); | 356 EmitMulOp(cond, B20, R0, rd, rn, rm); |
368 } | 357 } |
369 | 358 |
370 | 359 |
371 void Assembler::mla(Register rd, Register rn, | 360 void Assembler::mla(Register rd, |
372 Register rm, Register ra, Condition cond) { | 361 Register rn, |
| 362 Register rm, |
| 363 Register ra, |
| 364 Condition cond) { |
373 // rd <- ra + rn * rm. | 365 // rd <- ra + rn * rm. |
374 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. | 366 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. |
375 EmitMulOp(cond, B21, ra, rd, rn, rm); | 367 EmitMulOp(cond, B21, ra, rd, rn, rm); |
376 } | 368 } |
377 | 369 |
378 | 370 |
379 void Assembler::mls(Register rd, Register rn, | 371 void Assembler::mls(Register rd, |
380 Register rm, Register ra, Condition cond) { | 372 Register rn, |
| 373 Register rm, |
| 374 Register ra, |
| 375 Condition cond) { |
381 // rd <- ra - rn * rm. | 376 // rd <- ra - rn * rm. |
382 if (TargetCPUFeatures::arm_version() == ARMv7) { | 377 if (TargetCPUFeatures::arm_version() == ARMv7) { |
383 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. | 378 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. |
384 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm); | 379 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm); |
385 } else { | 380 } else { |
386 mul(IP, rn, rm, cond); | 381 mul(IP, rn, rm, cond); |
387 sub(rd, ra, Operand(IP), cond); | 382 sub(rd, ra, Operand(IP), cond); |
388 } | 383 } |
389 } | 384 } |
390 | 385 |
391 | 386 |
392 void Assembler::smull(Register rd_lo, Register rd_hi, | 387 void Assembler::smull(Register rd_lo, |
393 Register rn, Register rm, Condition cond) { | 388 Register rd_hi, |
| 389 Register rn, |
| 390 Register rm, |
| 391 Condition cond) { |
394 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. | 392 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
395 EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm); | 393 EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm); |
396 } | 394 } |
397 | 395 |
398 | 396 |
399 void Assembler::umull(Register rd_lo, Register rd_hi, | 397 void Assembler::umull(Register rd_lo, |
400 Register rn, Register rm, Condition cond) { | 398 Register rd_hi, |
| 399 Register rn, |
| 400 Register rm, |
| 401 Condition cond) { |
401 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. | 402 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
402 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm); | 403 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm); |
403 } | 404 } |
404 | 405 |
405 | 406 |
406 void Assembler::umlal(Register rd_lo, Register rd_hi, | 407 void Assembler::umlal(Register rd_lo, |
407 Register rn, Register rm, Condition cond) { | 408 Register rd_hi, |
| 409 Register rn, |
| 410 Register rm, |
| 411 Condition cond) { |
408 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. | 412 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
409 EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm); | 413 EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm); |
410 } | 414 } |
411 | 415 |
412 | 416 |
413 void Assembler::umaal(Register rd_lo, Register rd_hi, | 417 void Assembler::umaal(Register rd_lo, |
414 Register rn, Register rm) { | 418 Register rd_hi, |
| 419 Register rn, |
| 420 Register rm) { |
415 ASSERT(rd_lo != IP); | 421 ASSERT(rd_lo != IP); |
416 ASSERT(rd_hi != IP); | 422 ASSERT(rd_hi != IP); |
417 ASSERT(rn != IP); | 423 ASSERT(rn != IP); |
418 ASSERT(rm != IP); | 424 ASSERT(rm != IP); |
419 if (TargetCPUFeatures::arm_version() != ARMv5TE) { | 425 if (TargetCPUFeatures::arm_version() != ARMv5TE) { |
420 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. | 426 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
421 EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm); | 427 EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm); |
422 } else { | 428 } else { |
423 mov(IP, Operand(0)); | 429 mov(IP, Operand(0)); |
424 umlal(rd_lo, IP, rn, rm); | 430 umlal(rd_lo, IP, rn, rm); |
425 adds(rd_lo, rd_lo, Operand(rd_hi)); | 431 adds(rd_lo, rd_lo, Operand(rd_hi)); |
426 adc(rd_hi, IP, Operand(0)); | 432 adc(rd_hi, IP, Operand(0)); |
427 } | 433 } |
428 } | 434 } |
429 | 435 |
430 | 436 |
431 void Assembler::EmitDivOp(Condition cond, int32_t opcode, | 437 void Assembler::EmitDivOp(Condition cond, |
432 Register rd, Register rn, Register rm) { | 438 int32_t opcode, |
| 439 Register rd, |
| 440 Register rn, |
| 441 Register rm) { |
433 ASSERT(TargetCPUFeatures::integer_division_supported()); | 442 ASSERT(TargetCPUFeatures::integer_division_supported()); |
434 ASSERT(rd != kNoRegister); | 443 ASSERT(rd != kNoRegister); |
435 ASSERT(rn != kNoRegister); | 444 ASSERT(rn != kNoRegister); |
436 ASSERT(rm != kNoRegister); | 445 ASSERT(rm != kNoRegister); |
437 ASSERT(cond != kNoCondition); | 446 ASSERT(cond != kNoCondition); |
438 int32_t encoding = opcode | | 447 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) | |
439 (static_cast<int32_t>(cond) << kConditionShift) | | 448 (static_cast<int32_t>(rn) << kDivRnShift) | |
440 (static_cast<int32_t>(rn) << kDivRnShift) | | 449 (static_cast<int32_t>(rd) << kDivRdShift) | B26 | B25 | |
441 (static_cast<int32_t>(rd) << kDivRdShift) | | 450 B24 | B20 | B4 | (static_cast<int32_t>(rm) << kDivRmShift); |
442 B26 | B25 | B24 | B20 | B4 | | |
443 (static_cast<int32_t>(rm) << kDivRmShift); | |
444 Emit(encoding); | 451 Emit(encoding); |
445 } | 452 } |
446 | 453 |
447 | 454 |
448 void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) { | 455 void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) { |
449 EmitDivOp(cond, 0, rd, rn, rm); | 456 EmitDivOp(cond, 0, rd, rn, rm); |
450 } | 457 } |
451 | 458 |
452 | 459 |
453 void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) { | 460 void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) { |
454 EmitDivOp(cond, B21 , rd, rn, rm); | 461 EmitDivOp(cond, B21, rd, rn, rm); |
455 } | 462 } |
456 | 463 |
457 | 464 |
458 void Assembler::ldr(Register rd, Address ad, Condition cond) { | 465 void Assembler::ldr(Register rd, Address ad, Condition cond) { |
459 EmitMemOp(cond, true, false, rd, ad); | 466 EmitMemOp(cond, true, false, rd, ad); |
460 } | 467 } |
461 | 468 |
462 | 469 |
463 void Assembler::str(Register rd, Address ad, Condition cond) { | 470 void Assembler::str(Register rd, Address ad, Condition cond) { |
464 EmitMemOp(cond, false, false, rd, ad); | 471 EmitMemOp(cond, false, false, rd, ad); |
(...skipping 23 matching lines...) Expand all Loading... |
488 void Assembler::ldrsb(Register rd, Address ad, Condition cond) { | 495 void Assembler::ldrsb(Register rd, Address ad, Condition cond) { |
489 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad); | 496 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad); |
490 } | 497 } |
491 | 498 |
492 | 499 |
493 void Assembler::ldrsh(Register rd, Address ad, Condition cond) { | 500 void Assembler::ldrsh(Register rd, Address ad, Condition cond) { |
494 EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad); | 501 EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad); |
495 } | 502 } |
496 | 503 |
497 | 504 |
498 void Assembler::ldrd(Register rd, Register rd2, Register rn, int32_t offset, | 505 void Assembler::ldrd(Register rd, |
| 506 Register rd2, |
| 507 Register rn, |
| 508 int32_t offset, |
499 Condition cond) { | 509 Condition cond) { |
500 ASSERT((rd % 2) == 0); | 510 ASSERT((rd % 2) == 0); |
501 ASSERT(rd2 == rd + 1); | 511 ASSERT(rd2 == rd + 1); |
502 if (TargetCPUFeatures::arm_version() == ARMv5TE) { | 512 if (TargetCPUFeatures::arm_version() == ARMv5TE) { |
503 ldr(rd, Address(rn, offset), cond); | 513 ldr(rd, Address(rn, offset), cond); |
504 ldr(rd2, Address(rn, offset + kWordSize), cond); | 514 ldr(rd2, Address(rn, offset + kWordSize), cond); |
505 } else { | 515 } else { |
506 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset)); | 516 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset)); |
507 } | 517 } |
508 } | 518 } |
509 | 519 |
510 | 520 |
511 void Assembler::strd(Register rd, Register rd2, Register rn, int32_t offset, | 521 void Assembler::strd(Register rd, |
| 522 Register rd2, |
| 523 Register rn, |
| 524 int32_t offset, |
512 Condition cond) { | 525 Condition cond) { |
513 ASSERT((rd % 2) == 0); | 526 ASSERT((rd % 2) == 0); |
514 ASSERT(rd2 == rd + 1); | 527 ASSERT(rd2 == rd + 1); |
515 if (TargetCPUFeatures::arm_version() == ARMv5TE) { | 528 if (TargetCPUFeatures::arm_version() == ARMv5TE) { |
516 str(rd, Address(rn, offset), cond); | 529 str(rd, Address(rn, offset), cond); |
517 str(rd2, Address(rn, offset + kWordSize), cond); | 530 str(rd2, Address(rn, offset + kWordSize), cond); |
518 } else { | 531 } else { |
519 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset)); | 532 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset)); |
520 } | 533 } |
521 } | 534 } |
522 | 535 |
523 | 536 |
524 void Assembler::ldm(BlockAddressMode am, Register base, RegList regs, | 537 void Assembler::ldm(BlockAddressMode am, |
| 538 Register base, |
| 539 RegList regs, |
525 Condition cond) { | 540 Condition cond) { |
526 ASSERT(regs != 0); | 541 ASSERT(regs != 0); |
527 EmitMultiMemOp(cond, am, true, base, regs); | 542 EmitMultiMemOp(cond, am, true, base, regs); |
528 } | 543 } |
529 | 544 |
530 | 545 |
531 void Assembler::stm(BlockAddressMode am, Register base, RegList regs, | 546 void Assembler::stm(BlockAddressMode am, |
| 547 Register base, |
| 548 RegList regs, |
532 Condition cond) { | 549 Condition cond) { |
533 ASSERT(regs != 0); | 550 ASSERT(regs != 0); |
534 EmitMultiMemOp(cond, am, false, base, regs); | 551 EmitMultiMemOp(cond, am, false, base, regs); |
535 } | 552 } |
536 | 553 |
537 | 554 |
538 void Assembler::ldrex(Register rt, Register rn, Condition cond) { | 555 void Assembler::ldrex(Register rt, Register rn, Condition cond) { |
539 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); | 556 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); |
540 ASSERT(rn != kNoRegister); | 557 ASSERT(rn != kNoRegister); |
541 ASSERT(rt != kNoRegister); | 558 ASSERT(rt != kNoRegister); |
542 ASSERT(cond != kNoCondition); | 559 ASSERT(cond != kNoCondition); |
543 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 560 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
544 B24 | | 561 B23 | L | (static_cast<int32_t>(rn) << kLdExRnShift) | |
545 B23 | | 562 (static_cast<int32_t>(rt) << kLdExRtShift) | B11 | B10 | |
546 L | | 563 B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0; |
547 (static_cast<int32_t>(rn) << kLdExRnShift) | | |
548 (static_cast<int32_t>(rt) << kLdExRtShift) | | |
549 B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0; | |
550 Emit(encoding); | 564 Emit(encoding); |
551 } | 565 } |
552 | 566 |
553 | 567 |
554 void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) { | 568 void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) { |
555 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); | 569 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); |
556 ASSERT(rn != kNoRegister); | 570 ASSERT(rn != kNoRegister); |
557 ASSERT(rd != kNoRegister); | 571 ASSERT(rd != kNoRegister); |
558 ASSERT(rt != kNoRegister); | 572 ASSERT(rt != kNoRegister); |
559 ASSERT(cond != kNoCondition); | 573 ASSERT(cond != kNoCondition); |
560 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 574 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
561 B24 | | 575 B23 | (static_cast<int32_t>(rn) << kStrExRnShift) | |
562 B23 | | 576 (static_cast<int32_t>(rd) << kStrExRdShift) | B11 | B10 | |
563 (static_cast<int32_t>(rn) << kStrExRnShift) | | 577 B9 | B8 | B7 | B4 | |
564 (static_cast<int32_t>(rd) << kStrExRdShift) | | |
565 B11 | B10 | B9 | B8 | B7 | B4 | | |
566 (static_cast<int32_t>(rt) << kStrExRtShift); | 578 (static_cast<int32_t>(rt) << kStrExRtShift); |
567 Emit(encoding); | 579 Emit(encoding); |
568 } | 580 } |
569 | 581 |
570 | 582 |
571 void Assembler::clrex() { | 583 void Assembler::clrex() { |
572 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); | 584 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); |
573 int32_t encoding = (kSpecialCondition << kConditionShift) | | 585 int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 | |
574 B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf; | 586 B21 | B20 | (0xff << 12) | B4 | 0xf; |
575 Emit(encoding); | 587 Emit(encoding); |
576 } | 588 } |
577 | 589 |
578 | 590 |
579 void Assembler::nop(Condition cond) { | 591 void Assembler::nop(Condition cond) { |
580 ASSERT(cond != kNoCondition); | 592 ASSERT(cond != kNoCondition); |
581 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 593 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B25 | |
582 B25 | B24 | B21 | (0xf << 12); | 594 B24 | B21 | (0xf << 12); |
583 Emit(encoding); | 595 Emit(encoding); |
584 } | 596 } |
585 | 597 |
586 | 598 |
587 void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) { | 599 void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) { |
588 ASSERT(TargetCPUFeatures::vfp_supported()); | 600 ASSERT(TargetCPUFeatures::vfp_supported()); |
589 ASSERT(sn != kNoSRegister); | 601 ASSERT(sn != kNoSRegister); |
590 ASSERT(rt != kNoRegister); | 602 ASSERT(rt != kNoRegister); |
591 ASSERT(rt != SP); | 603 ASSERT(rt != SP); |
592 ASSERT(rt != PC); | 604 ASSERT(rt != PC); |
593 ASSERT(cond != kNoCondition); | 605 ASSERT(cond != kNoCondition); |
594 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 606 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
595 B27 | B26 | B25 | | 607 B26 | B25 | ((static_cast<int32_t>(sn) >> 1) * B16) | |
596 ((static_cast<int32_t>(sn) >> 1)*B16) | | 608 (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
597 (static_cast<int32_t>(rt)*B12) | B11 | B9 | | 609 ((static_cast<int32_t>(sn) & 1) * B7) | B4; |
598 ((static_cast<int32_t>(sn) & 1)*B7) | B4; | |
599 Emit(encoding); | 610 Emit(encoding); |
600 } | 611 } |
601 | 612 |
602 | 613 |
603 void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) { | 614 void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) { |
604 ASSERT(TargetCPUFeatures::vfp_supported()); | 615 ASSERT(TargetCPUFeatures::vfp_supported()); |
605 ASSERT(sn != kNoSRegister); | 616 ASSERT(sn != kNoSRegister); |
606 ASSERT(rt != kNoRegister); | 617 ASSERT(rt != kNoRegister); |
607 ASSERT(rt != SP); | 618 ASSERT(rt != SP); |
608 ASSERT(rt != PC); | 619 ASSERT(rt != PC); |
609 ASSERT(cond != kNoCondition); | 620 ASSERT(cond != kNoCondition); |
610 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 621 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
611 B27 | B26 | B25 | B20 | | 622 B26 | B25 | B20 | ((static_cast<int32_t>(sn) >> 1) * B16) | |
612 ((static_cast<int32_t>(sn) >> 1)*B16) | | 623 (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
613 (static_cast<int32_t>(rt)*B12) | B11 | B9 | | 624 ((static_cast<int32_t>(sn) & 1) * B7) | B4; |
614 ((static_cast<int32_t>(sn) & 1)*B7) | B4; | |
615 Emit(encoding); | 625 Emit(encoding); |
616 } | 626 } |
617 | 627 |
618 | 628 |
619 void Assembler::vmovsrr(SRegister sm, Register rt, Register rt2, | 629 void Assembler::vmovsrr(SRegister sm, |
| 630 Register rt, |
| 631 Register rt2, |
620 Condition cond) { | 632 Condition cond) { |
621 ASSERT(TargetCPUFeatures::vfp_supported()); | 633 ASSERT(TargetCPUFeatures::vfp_supported()); |
622 ASSERT(sm != kNoSRegister); | 634 ASSERT(sm != kNoSRegister); |
623 ASSERT(sm != S31); | 635 ASSERT(sm != S31); |
624 ASSERT(rt != kNoRegister); | 636 ASSERT(rt != kNoRegister); |
625 ASSERT(rt != SP); | 637 ASSERT(rt != SP); |
626 ASSERT(rt != PC); | 638 ASSERT(rt != PC); |
627 ASSERT(rt2 != kNoRegister); | 639 ASSERT(rt2 != kNoRegister); |
628 ASSERT(rt2 != SP); | 640 ASSERT(rt2 != SP); |
629 ASSERT(rt2 != PC); | 641 ASSERT(rt2 != PC); |
630 ASSERT(cond != kNoCondition); | 642 ASSERT(cond != kNoCondition); |
631 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 643 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
632 B27 | B26 | B22 | | 644 B26 | B22 | (static_cast<int32_t>(rt2) * B16) | |
633 (static_cast<int32_t>(rt2)*B16) | | 645 (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
634 (static_cast<int32_t>(rt)*B12) | B11 | B9 | | 646 ((static_cast<int32_t>(sm) & 1) * B5) | B4 | |
635 ((static_cast<int32_t>(sm) & 1)*B5) | B4 | | |
636 (static_cast<int32_t>(sm) >> 1); | 647 (static_cast<int32_t>(sm) >> 1); |
637 Emit(encoding); | 648 Emit(encoding); |
638 } | 649 } |
639 | 650 |
640 | 651 |
641 void Assembler::vmovrrs(Register rt, Register rt2, SRegister sm, | 652 void Assembler::vmovrrs(Register rt, |
| 653 Register rt2, |
| 654 SRegister sm, |
642 Condition cond) { | 655 Condition cond) { |
643 ASSERT(TargetCPUFeatures::vfp_supported()); | 656 ASSERT(TargetCPUFeatures::vfp_supported()); |
644 ASSERT(sm != kNoSRegister); | 657 ASSERT(sm != kNoSRegister); |
645 ASSERT(sm != S31); | 658 ASSERT(sm != S31); |
646 ASSERT(rt != kNoRegister); | 659 ASSERT(rt != kNoRegister); |
647 ASSERT(rt != SP); | 660 ASSERT(rt != SP); |
648 ASSERT(rt != PC); | 661 ASSERT(rt != PC); |
649 ASSERT(rt2 != kNoRegister); | 662 ASSERT(rt2 != kNoRegister); |
650 ASSERT(rt2 != SP); | 663 ASSERT(rt2 != SP); |
651 ASSERT(rt2 != PC); | 664 ASSERT(rt2 != PC); |
652 ASSERT(rt != rt2); | 665 ASSERT(rt != rt2); |
653 ASSERT(cond != kNoCondition); | 666 ASSERT(cond != kNoCondition); |
654 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 667 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
655 B27 | B26 | B22 | B20 | | 668 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) | |
656 (static_cast<int32_t>(rt2)*B16) | | 669 (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
657 (static_cast<int32_t>(rt)*B12) | B11 | B9 | | 670 ((static_cast<int32_t>(sm) & 1) * B5) | B4 | |
658 ((static_cast<int32_t>(sm) & 1)*B5) | B4 | | |
659 (static_cast<int32_t>(sm) >> 1); | 671 (static_cast<int32_t>(sm) >> 1); |
660 Emit(encoding); | 672 Emit(encoding); |
661 } | 673 } |
662 | 674 |
663 | 675 |
664 void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) { | 676 void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) { |
665 ASSERT(TargetCPUFeatures::vfp_supported()); | 677 ASSERT(TargetCPUFeatures::vfp_supported()); |
666 ASSERT((i == 0) || (i == 1)); | 678 ASSERT((i == 0) || (i == 1)); |
667 ASSERT(rt != kNoRegister); | 679 ASSERT(rt != kNoRegister); |
668 ASSERT(rt != SP); | 680 ASSERT(rt != SP); |
669 ASSERT(rt != PC); | 681 ASSERT(rt != PC); |
670 ASSERT(dn != kNoDRegister); | 682 ASSERT(dn != kNoDRegister); |
671 ASSERT(cond != kNoCondition); | 683 ASSERT(cond != kNoCondition); |
672 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 684 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
673 B27 | B26 | B25 | | 685 B26 | B25 | (i * B21) | (static_cast<int32_t>(rt) * B12) | |
674 (i*B21) | | 686 B11 | B9 | B8 | ((static_cast<int32_t>(dn) >> 4) * B7) | |
675 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 | | 687 ((static_cast<int32_t>(dn) & 0xf) * B16) | B4; |
676 ((static_cast<int32_t>(dn) >> 4)*B7) | | |
677 ((static_cast<int32_t>(dn) & 0xf)*B16) | B4; | |
678 Emit(encoding); | 688 Emit(encoding); |
679 } | 689 } |
680 | 690 |
681 | 691 |
682 void Assembler::vmovdrr(DRegister dm, Register rt, Register rt2, | 692 void Assembler::vmovdrr(DRegister dm, |
| 693 Register rt, |
| 694 Register rt2, |
683 Condition cond) { | 695 Condition cond) { |
684 ASSERT(TargetCPUFeatures::vfp_supported()); | 696 ASSERT(TargetCPUFeatures::vfp_supported()); |
685 ASSERT(dm != kNoDRegister); | 697 ASSERT(dm != kNoDRegister); |
686 ASSERT(rt != kNoRegister); | 698 ASSERT(rt != kNoRegister); |
687 ASSERT(rt != SP); | 699 ASSERT(rt != SP); |
688 ASSERT(rt != PC); | 700 ASSERT(rt != PC); |
689 ASSERT(rt2 != kNoRegister); | 701 ASSERT(rt2 != kNoRegister); |
690 ASSERT(rt2 != SP); | 702 ASSERT(rt2 != SP); |
691 ASSERT(rt2 != PC); | 703 ASSERT(rt2 != PC); |
692 ASSERT(cond != kNoCondition); | 704 ASSERT(cond != kNoCondition); |
693 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 705 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
694 B27 | B26 | B22 | | 706 B26 | B22 | (static_cast<int32_t>(rt2) * B16) | |
695 (static_cast<int32_t>(rt2)*B16) | | 707 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 | |
696 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 | | 708 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 | |
697 ((static_cast<int32_t>(dm) >> 4)*B5) | B4 | | |
698 (static_cast<int32_t>(dm) & 0xf); | 709 (static_cast<int32_t>(dm) & 0xf); |
699 Emit(encoding); | 710 Emit(encoding); |
700 } | 711 } |
701 | 712 |
702 | 713 |
703 void Assembler::vmovrrd(Register rt, Register rt2, DRegister dm, | 714 void Assembler::vmovrrd(Register rt, |
| 715 Register rt2, |
| 716 DRegister dm, |
704 Condition cond) { | 717 Condition cond) { |
705 ASSERT(TargetCPUFeatures::vfp_supported()); | 718 ASSERT(TargetCPUFeatures::vfp_supported()); |
706 ASSERT(dm != kNoDRegister); | 719 ASSERT(dm != kNoDRegister); |
707 ASSERT(rt != kNoRegister); | 720 ASSERT(rt != kNoRegister); |
708 ASSERT(rt != SP); | 721 ASSERT(rt != SP); |
709 ASSERT(rt != PC); | 722 ASSERT(rt != PC); |
710 ASSERT(rt2 != kNoRegister); | 723 ASSERT(rt2 != kNoRegister); |
711 ASSERT(rt2 != SP); | 724 ASSERT(rt2 != SP); |
712 ASSERT(rt2 != PC); | 725 ASSERT(rt2 != PC); |
713 ASSERT(rt != rt2); | 726 ASSERT(rt != rt2); |
714 ASSERT(cond != kNoCondition); | 727 ASSERT(cond != kNoCondition); |
715 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 728 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
716 B27 | B26 | B22 | B20 | | 729 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) | |
717 (static_cast<int32_t>(rt2)*B16) | | 730 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 | |
718 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 | | 731 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 | |
719 ((static_cast<int32_t>(dm) >> 4)*B5) | B4 | | |
720 (static_cast<int32_t>(dm) & 0xf); | 732 (static_cast<int32_t>(dm) & 0xf); |
721 Emit(encoding); | 733 Emit(encoding); |
722 } | 734 } |
723 | 735 |
724 | 736 |
725 void Assembler::vldrs(SRegister sd, Address ad, Condition cond) { | 737 void Assembler::vldrs(SRegister sd, Address ad, Condition cond) { |
726 ASSERT(TargetCPUFeatures::vfp_supported()); | 738 ASSERT(TargetCPUFeatures::vfp_supported()); |
727 ASSERT(sd != kNoSRegister); | 739 ASSERT(sd != kNoSRegister); |
728 ASSERT(cond != kNoCondition); | 740 ASSERT(cond != kNoCondition); |
729 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 741 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
730 B27 | B26 | B24 | B20 | | 742 B26 | B24 | B20 | ((static_cast<int32_t>(sd) & 1) * B22) | |
731 ((static_cast<int32_t>(sd) & 1)*B22) | | 743 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 | |
732 ((static_cast<int32_t>(sd) >> 1)*B12) | | 744 ad.vencoding(); |
733 B11 | B9 | ad.vencoding(); | |
734 Emit(encoding); | 745 Emit(encoding); |
735 } | 746 } |
736 | 747 |
737 | 748 |
738 void Assembler::vstrs(SRegister sd, Address ad, Condition cond) { | 749 void Assembler::vstrs(SRegister sd, Address ad, Condition cond) { |
739 ASSERT(TargetCPUFeatures::vfp_supported()); | 750 ASSERT(TargetCPUFeatures::vfp_supported()); |
740 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); | 751 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); |
741 ASSERT(sd != kNoSRegister); | 752 ASSERT(sd != kNoSRegister); |
742 ASSERT(cond != kNoCondition); | 753 ASSERT(cond != kNoCondition); |
743 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 754 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
744 B27 | B26 | B24 | | 755 B26 | B24 | ((static_cast<int32_t>(sd) & 1) * B22) | |
745 ((static_cast<int32_t>(sd) & 1)*B22) | | 756 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 | |
746 ((static_cast<int32_t>(sd) >> 1)*B12) | | 757 ad.vencoding(); |
747 B11 | B9 | ad.vencoding(); | |
748 Emit(encoding); | 758 Emit(encoding); |
749 } | 759 } |
750 | 760 |
751 | 761 |
752 void Assembler::vldrd(DRegister dd, Address ad, Condition cond) { | 762 void Assembler::vldrd(DRegister dd, Address ad, Condition cond) { |
753 ASSERT(TargetCPUFeatures::vfp_supported()); | 763 ASSERT(TargetCPUFeatures::vfp_supported()); |
754 ASSERT(dd != kNoDRegister); | 764 ASSERT(dd != kNoDRegister); |
755 ASSERT(cond != kNoCondition); | 765 ASSERT(cond != kNoCondition); |
756 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 766 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
757 B27 | B26 | B24 | B20 | | 767 B26 | B24 | B20 | ((static_cast<int32_t>(dd) >> 4) * B22) | |
758 ((static_cast<int32_t>(dd) >> 4)*B22) | | 768 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 | |
759 ((static_cast<int32_t>(dd) & 0xf)*B12) | | 769 ad.vencoding(); |
760 B11 | B9 | B8 | ad.vencoding(); | |
761 Emit(encoding); | 770 Emit(encoding); |
762 } | 771 } |
763 | 772 |
764 | 773 |
765 void Assembler::vstrd(DRegister dd, Address ad, Condition cond) { | 774 void Assembler::vstrd(DRegister dd, Address ad, Condition cond) { |
766 ASSERT(TargetCPUFeatures::vfp_supported()); | 775 ASSERT(TargetCPUFeatures::vfp_supported()); |
767 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); | 776 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); |
768 ASSERT(dd != kNoDRegister); | 777 ASSERT(dd != kNoDRegister); |
769 ASSERT(cond != kNoCondition); | 778 ASSERT(cond != kNoCondition); |
770 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 779 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
771 B27 | B26 | B24 | | 780 B26 | B24 | ((static_cast<int32_t>(dd) >> 4) * B22) | |
772 ((static_cast<int32_t>(dd) >> 4)*B22) | | 781 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 | |
773 ((static_cast<int32_t>(dd) & 0xf)*B12) | | 782 ad.vencoding(); |
774 B11 | B9 | B8 | ad.vencoding(); | |
775 Emit(encoding); | 783 Emit(encoding); |
776 } | 784 } |
777 | 785 |
778 void Assembler::EmitMultiVSMemOp(Condition cond, | 786 void Assembler::EmitMultiVSMemOp(Condition cond, |
779 BlockAddressMode am, | 787 BlockAddressMode am, |
780 bool load, | 788 bool load, |
781 Register base, | 789 Register base, |
782 SRegister start, | 790 SRegister start, |
783 uint32_t count) { | 791 uint32_t count) { |
784 ASSERT(TargetCPUFeatures::vfp_supported()); | 792 ASSERT(TargetCPUFeatures::vfp_supported()); |
785 ASSERT(base != kNoRegister); | 793 ASSERT(base != kNoRegister); |
786 ASSERT(cond != kNoCondition); | 794 ASSERT(cond != kNoCondition); |
787 ASSERT(start != kNoSRegister); | 795 ASSERT(start != kNoSRegister); |
788 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters); | 796 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters); |
789 | 797 |
790 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 798 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
791 B27 | B26 | B11 | B9 | | 799 B26 | B11 | B9 | am | (load ? L : 0) | |
792 am | | |
793 (load ? L : 0) | | |
794 (static_cast<int32_t>(base) << kRnShift) | | 800 (static_cast<int32_t>(base) << kRnShift) | |
795 ((static_cast<int32_t>(start) & 0x1) ? D : 0) | | 801 ((static_cast<int32_t>(start) & 0x1) ? D : 0) | |
796 ((static_cast<int32_t>(start) >> 1) << 12) | | 802 ((static_cast<int32_t>(start) >> 1) << 12) | count; |
797 count; | |
798 Emit(encoding); | 803 Emit(encoding); |
799 } | 804 } |
800 | 805 |
801 | 806 |
802 void Assembler::EmitMultiVDMemOp(Condition cond, | 807 void Assembler::EmitMultiVDMemOp(Condition cond, |
803 BlockAddressMode am, | 808 BlockAddressMode am, |
804 bool load, | 809 bool load, |
805 Register base, | 810 Register base, |
806 DRegister start, | 811 DRegister start, |
807 int32_t count) { | 812 int32_t count) { |
808 ASSERT(TargetCPUFeatures::vfp_supported()); | 813 ASSERT(TargetCPUFeatures::vfp_supported()); |
809 ASSERT(base != kNoRegister); | 814 ASSERT(base != kNoRegister); |
810 ASSERT(cond != kNoCondition); | 815 ASSERT(cond != kNoCondition); |
811 ASSERT(start != kNoDRegister); | 816 ASSERT(start != kNoDRegister); |
812 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters); | 817 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters); |
813 const int armv5te = TargetCPUFeatures::arm_version() == ARMv5TE ? 1 : 0; | 818 const int armv5te = TargetCPUFeatures::arm_version() == ARMv5TE ? 1 : 0; |
814 | 819 |
815 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 820 int32_t encoding = |
816 B27 | B26 | B11 | B9 | B8 | | 821 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B11 | B9 | |
817 am | | 822 B8 | am | (load ? L : 0) | (static_cast<int32_t>(base) << kRnShift) | |
818 (load ? L : 0) | | 823 ((static_cast<int32_t>(start) & 0x10) ? D : 0) | |
819 (static_cast<int32_t>(base) << kRnShift) | | 824 ((static_cast<int32_t>(start) & 0xf) << 12) | (count << 1) | armv5te; |
820 ((static_cast<int32_t>(start) & 0x10) ? D : 0) | | |
821 ((static_cast<int32_t>(start) & 0xf) << 12) | | |
822 (count << 1) | armv5te; | |
823 Emit(encoding); | 825 Emit(encoding); |
824 } | 826 } |
825 | 827 |
826 | 828 |
827 void Assembler::vldms(BlockAddressMode am, Register base, | 829 void Assembler::vldms(BlockAddressMode am, |
828 SRegister first, SRegister last, Condition cond) { | 830 Register base, |
| 831 SRegister first, |
| 832 SRegister last, |
| 833 Condition cond) { |
829 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); | 834 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
830 ASSERT(last > first); | 835 ASSERT(last > first); |
831 EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1); | 836 EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1); |
832 } | 837 } |
833 | 838 |
834 | 839 |
835 void Assembler::vstms(BlockAddressMode am, Register base, | 840 void Assembler::vstms(BlockAddressMode am, |
836 SRegister first, SRegister last, Condition cond) { | 841 Register base, |
| 842 SRegister first, |
| 843 SRegister last, |
| 844 Condition cond) { |
837 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); | 845 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
838 ASSERT(last > first); | 846 ASSERT(last > first); |
839 EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1); | 847 EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1); |
840 } | 848 } |
841 | 849 |
842 | 850 |
843 void Assembler::vldmd(BlockAddressMode am, Register base, | 851 void Assembler::vldmd(BlockAddressMode am, |
844 DRegister first, intptr_t count, Condition cond) { | 852 Register base, |
| 853 DRegister first, |
| 854 intptr_t count, |
| 855 Condition cond) { |
845 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); | 856 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
846 ASSERT(count <= 16); | 857 ASSERT(count <= 16); |
847 ASSERT(first + count <= kNumberOfDRegisters); | 858 ASSERT(first + count <= kNumberOfDRegisters); |
848 EmitMultiVDMemOp(cond, am, true, base, first, count); | 859 EmitMultiVDMemOp(cond, am, true, base, first, count); |
849 } | 860 } |
850 | 861 |
851 | 862 |
852 void Assembler::vstmd(BlockAddressMode am, Register base, | 863 void Assembler::vstmd(BlockAddressMode am, |
853 DRegister first, intptr_t count, Condition cond) { | 864 Register base, |
| 865 DRegister first, |
| 866 intptr_t count, |
| 867 Condition cond) { |
854 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); | 868 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
855 ASSERT(count <= 16); | 869 ASSERT(count <= 16); |
856 ASSERT(first + count <= kNumberOfDRegisters); | 870 ASSERT(first + count <= kNumberOfDRegisters); |
857 EmitMultiVDMemOp(cond, am, false, base, first, count); | 871 EmitMultiVDMemOp(cond, am, false, base, first, count); |
858 } | 872 } |
859 | 873 |
860 | 874 |
861 void Assembler::EmitVFPsss(Condition cond, int32_t opcode, | 875 void Assembler::EmitVFPsss(Condition cond, |
862 SRegister sd, SRegister sn, SRegister sm) { | 876 int32_t opcode, |
| 877 SRegister sd, |
| 878 SRegister sn, |
| 879 SRegister sm) { |
863 ASSERT(TargetCPUFeatures::vfp_supported()); | 880 ASSERT(TargetCPUFeatures::vfp_supported()); |
864 ASSERT(sd != kNoSRegister); | 881 ASSERT(sd != kNoSRegister); |
865 ASSERT(sn != kNoSRegister); | 882 ASSERT(sn != kNoSRegister); |
866 ASSERT(sm != kNoSRegister); | 883 ASSERT(sm != kNoSRegister); |
867 ASSERT(cond != kNoCondition); | 884 ASSERT(cond != kNoCondition); |
868 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 885 int32_t encoding = |
869 B27 | B26 | B25 | B11 | B9 | opcode | | 886 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
870 ((static_cast<int32_t>(sd) & 1)*B22) | | 887 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) | |
871 ((static_cast<int32_t>(sn) >> 1)*B16) | | 888 ((static_cast<int32_t>(sn) >> 1) * B16) | |
872 ((static_cast<int32_t>(sd) >> 1)*B12) | | 889 ((static_cast<int32_t>(sd) >> 1) * B12) | |
873 ((static_cast<int32_t>(sn) & 1)*B7) | | 890 ((static_cast<int32_t>(sn) & 1) * B7) | |
874 ((static_cast<int32_t>(sm) & 1)*B5) | | 891 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1); |
875 (static_cast<int32_t>(sm) >> 1); | |
876 Emit(encoding); | 892 Emit(encoding); |
877 } | 893 } |
878 | 894 |
879 | 895 |
880 void Assembler::EmitVFPddd(Condition cond, int32_t opcode, | 896 void Assembler::EmitVFPddd(Condition cond, |
881 DRegister dd, DRegister dn, DRegister dm) { | 897 int32_t opcode, |
| 898 DRegister dd, |
| 899 DRegister dn, |
| 900 DRegister dm) { |
882 ASSERT(TargetCPUFeatures::vfp_supported()); | 901 ASSERT(TargetCPUFeatures::vfp_supported()); |
883 ASSERT(dd != kNoDRegister); | 902 ASSERT(dd != kNoDRegister); |
884 ASSERT(dn != kNoDRegister); | 903 ASSERT(dn != kNoDRegister); |
885 ASSERT(dm != kNoDRegister); | 904 ASSERT(dm != kNoDRegister); |
886 ASSERT(cond != kNoCondition); | 905 ASSERT(cond != kNoCondition); |
887 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 906 int32_t encoding = |
888 B27 | B26 | B25 | B11 | B9 | B8 | opcode | | 907 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
889 ((static_cast<int32_t>(dd) >> 4)*B22) | | 908 B9 | B8 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) | |
890 ((static_cast<int32_t>(dn) & 0xf)*B16) | | 909 ((static_cast<int32_t>(dn) & 0xf) * B16) | |
891 ((static_cast<int32_t>(dd) & 0xf)*B12) | | 910 ((static_cast<int32_t>(dd) & 0xf) * B12) | |
892 ((static_cast<int32_t>(dn) >> 4)*B7) | | 911 ((static_cast<int32_t>(dn) >> 4) * B7) | |
893 ((static_cast<int32_t>(dm) >> 4)*B5) | | 912 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
894 (static_cast<int32_t>(dm) & 0xf); | |
895 Emit(encoding); | 913 Emit(encoding); |
896 } | 914 } |
897 | 915 |
898 | 916 |
899 void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) { | 917 void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) { |
900 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm); | 918 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm); |
901 } | 919 } |
902 | 920 |
903 | 921 |
904 void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) { | 922 void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) { |
905 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm); | 923 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm); |
906 } | 924 } |
907 | 925 |
908 | 926 |
909 bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) { | 927 bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) { |
910 if (TargetCPUFeatures::arm_version() != ARMv7) { | 928 if (TargetCPUFeatures::arm_version() != ARMv7) { |
911 return false; | 929 return false; |
912 } | 930 } |
913 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm); | 931 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm); |
914 if (((imm32 & ((1 << 19) - 1)) == 0) && | 932 if (((imm32 & ((1 << 19) - 1)) == 0) && |
915 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) || | 933 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) || |
916 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) { | 934 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) - 1)))) { |
917 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) | | 935 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) | |
918 ((imm32 >> 19) & ((1 << 6) -1)); | 936 ((imm32 >> 19) & ((1 << 6) - 1)); |
919 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf), | 937 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | (imm8 & 0xf), sd, |
920 sd, S0, S0); | 938 S0, S0); |
921 return true; | 939 return true; |
922 } | 940 } |
923 return false; | 941 return false; |
924 } | 942 } |
925 | 943 |
926 | 944 |
927 bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) { | 945 bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) { |
928 if (TargetCPUFeatures::arm_version() != ARMv7) { | 946 if (TargetCPUFeatures::arm_version() != ARMv7) { |
929 return false; | 947 return false; |
930 } | 948 } |
931 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm); | 949 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm); |
932 if (((imm64 & ((1LL << 48) - 1)) == 0) && | 950 if (((imm64 & ((1LL << 48) - 1)) == 0) && |
933 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) || | 951 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) || |
934 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) { | 952 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) - 1)))) { |
935 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) | | 953 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) | |
936 ((imm64 >> 48) & ((1 << 6) -1)); | 954 ((imm64 >> 48) & ((1 << 6) - 1)); |
937 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf), | 955 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | B8 | (imm8 & 0xf), |
938 dd, D0, D0); | 956 dd, D0, D0); |
939 return true; | 957 return true; |
940 } | 958 } |
941 return false; | 959 return false; |
942 } | 960 } |
943 | 961 |
944 | 962 |
945 void Assembler::vadds(SRegister sd, SRegister sn, SRegister sm, | 963 void Assembler::vadds(SRegister sd, |
| 964 SRegister sn, |
| 965 SRegister sm, |
946 Condition cond) { | 966 Condition cond) { |
947 EmitVFPsss(cond, B21 | B20, sd, sn, sm); | 967 EmitVFPsss(cond, B21 | B20, sd, sn, sm); |
948 } | 968 } |
949 | 969 |
950 | 970 |
951 void Assembler::vaddd(DRegister dd, DRegister dn, DRegister dm, | 971 void Assembler::vaddd(DRegister dd, |
| 972 DRegister dn, |
| 973 DRegister dm, |
952 Condition cond) { | 974 Condition cond) { |
953 EmitVFPddd(cond, B21 | B20, dd, dn, dm); | 975 EmitVFPddd(cond, B21 | B20, dd, dn, dm); |
954 } | 976 } |
955 | 977 |
956 | 978 |
957 void Assembler::vsubs(SRegister sd, SRegister sn, SRegister sm, | 979 void Assembler::vsubs(SRegister sd, |
| 980 SRegister sn, |
| 981 SRegister sm, |
958 Condition cond) { | 982 Condition cond) { |
959 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm); | 983 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm); |
960 } | 984 } |
961 | 985 |
962 | 986 |
963 void Assembler::vsubd(DRegister dd, DRegister dn, DRegister dm, | 987 void Assembler::vsubd(DRegister dd, |
| 988 DRegister dn, |
| 989 DRegister dm, |
964 Condition cond) { | 990 Condition cond) { |
965 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm); | 991 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm); |
966 } | 992 } |
967 | 993 |
968 | 994 |
969 void Assembler::vmuls(SRegister sd, SRegister sn, SRegister sm, | 995 void Assembler::vmuls(SRegister sd, |
| 996 SRegister sn, |
| 997 SRegister sm, |
970 Condition cond) { | 998 Condition cond) { |
971 EmitVFPsss(cond, B21, sd, sn, sm); | 999 EmitVFPsss(cond, B21, sd, sn, sm); |
972 } | 1000 } |
973 | 1001 |
974 | 1002 |
975 void Assembler::vmuld(DRegister dd, DRegister dn, DRegister dm, | 1003 void Assembler::vmuld(DRegister dd, |
| 1004 DRegister dn, |
| 1005 DRegister dm, |
976 Condition cond) { | 1006 Condition cond) { |
977 EmitVFPddd(cond, B21, dd, dn, dm); | 1007 EmitVFPddd(cond, B21, dd, dn, dm); |
978 } | 1008 } |
979 | 1009 |
980 | 1010 |
981 void Assembler::vmlas(SRegister sd, SRegister sn, SRegister sm, | 1011 void Assembler::vmlas(SRegister sd, |
| 1012 SRegister sn, |
| 1013 SRegister sm, |
982 Condition cond) { | 1014 Condition cond) { |
983 EmitVFPsss(cond, 0, sd, sn, sm); | 1015 EmitVFPsss(cond, 0, sd, sn, sm); |
984 } | 1016 } |
985 | 1017 |
986 | 1018 |
987 void Assembler::vmlad(DRegister dd, DRegister dn, DRegister dm, | 1019 void Assembler::vmlad(DRegister dd, |
| 1020 DRegister dn, |
| 1021 DRegister dm, |
988 Condition cond) { | 1022 Condition cond) { |
989 EmitVFPddd(cond, 0, dd, dn, dm); | 1023 EmitVFPddd(cond, 0, dd, dn, dm); |
990 } | 1024 } |
991 | 1025 |
992 | 1026 |
993 void Assembler::vmlss(SRegister sd, SRegister sn, SRegister sm, | 1027 void Assembler::vmlss(SRegister sd, |
| 1028 SRegister sn, |
| 1029 SRegister sm, |
994 Condition cond) { | 1030 Condition cond) { |
995 EmitVFPsss(cond, B6, sd, sn, sm); | 1031 EmitVFPsss(cond, B6, sd, sn, sm); |
996 } | 1032 } |
997 | 1033 |
998 | 1034 |
999 void Assembler::vmlsd(DRegister dd, DRegister dn, DRegister dm, | 1035 void Assembler::vmlsd(DRegister dd, |
| 1036 DRegister dn, |
| 1037 DRegister dm, |
1000 Condition cond) { | 1038 Condition cond) { |
1001 EmitVFPddd(cond, B6, dd, dn, dm); | 1039 EmitVFPddd(cond, B6, dd, dn, dm); |
1002 } | 1040 } |
1003 | 1041 |
1004 | 1042 |
1005 void Assembler::vdivs(SRegister sd, SRegister sn, SRegister sm, | 1043 void Assembler::vdivs(SRegister sd, |
| 1044 SRegister sn, |
| 1045 SRegister sm, |
1006 Condition cond) { | 1046 Condition cond) { |
1007 EmitVFPsss(cond, B23, sd, sn, sm); | 1047 EmitVFPsss(cond, B23, sd, sn, sm); |
1008 } | 1048 } |
1009 | 1049 |
1010 | 1050 |
1011 void Assembler::vdivd(DRegister dd, DRegister dn, DRegister dm, | 1051 void Assembler::vdivd(DRegister dd, |
| 1052 DRegister dn, |
| 1053 DRegister dm, |
1012 Condition cond) { | 1054 Condition cond) { |
1013 EmitVFPddd(cond, B23, dd, dn, dm); | 1055 EmitVFPddd(cond, B23, dd, dn, dm); |
1014 } | 1056 } |
1015 | 1057 |
1016 | 1058 |
1017 void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) { | 1059 void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) { |
1018 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm); | 1060 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm); |
1019 } | 1061 } |
1020 | 1062 |
1021 | 1063 |
(...skipping 14 matching lines...) Expand all Loading... |
1036 | 1078 |
1037 void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) { | 1079 void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) { |
1038 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm); | 1080 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm); |
1039 } | 1081 } |
1040 | 1082 |
1041 void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) { | 1083 void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) { |
1042 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm); | 1084 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm); |
1043 } | 1085 } |
1044 | 1086 |
1045 | 1087 |
1046 void Assembler::EmitVFPsd(Condition cond, int32_t opcode, | 1088 void Assembler::EmitVFPsd(Condition cond, |
1047 SRegister sd, DRegister dm) { | 1089 int32_t opcode, |
| 1090 SRegister sd, |
| 1091 DRegister dm) { |
1048 ASSERT(TargetCPUFeatures::vfp_supported()); | 1092 ASSERT(TargetCPUFeatures::vfp_supported()); |
1049 ASSERT(sd != kNoSRegister); | 1093 ASSERT(sd != kNoSRegister); |
1050 ASSERT(dm != kNoDRegister); | 1094 ASSERT(dm != kNoDRegister); |
1051 ASSERT(cond != kNoCondition); | 1095 ASSERT(cond != kNoCondition); |
1052 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 1096 int32_t encoding = |
1053 B27 | B26 | B25 | B11 | B9 | opcode | | 1097 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
1054 ((static_cast<int32_t>(sd) & 1)*B22) | | 1098 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) | |
1055 ((static_cast<int32_t>(sd) >> 1)*B12) | | 1099 ((static_cast<int32_t>(sd) >> 1) * B12) | |
1056 ((static_cast<int32_t>(dm) >> 4)*B5) | | 1100 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
1057 (static_cast<int32_t>(dm) & 0xf); | |
1058 Emit(encoding); | 1101 Emit(encoding); |
1059 } | 1102 } |
1060 | 1103 |
1061 | 1104 |
1062 void Assembler::EmitVFPds(Condition cond, int32_t opcode, | 1105 void Assembler::EmitVFPds(Condition cond, |
1063 DRegister dd, SRegister sm) { | 1106 int32_t opcode, |
| 1107 DRegister dd, |
| 1108 SRegister sm) { |
1064 ASSERT(TargetCPUFeatures::vfp_supported()); | 1109 ASSERT(TargetCPUFeatures::vfp_supported()); |
1065 ASSERT(dd != kNoDRegister); | 1110 ASSERT(dd != kNoDRegister); |
1066 ASSERT(sm != kNoSRegister); | 1111 ASSERT(sm != kNoSRegister); |
1067 ASSERT(cond != kNoCondition); | 1112 ASSERT(cond != kNoCondition); |
1068 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 1113 int32_t encoding = |
1069 B27 | B26 | B25 | B11 | B9 | opcode | | 1114 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
1070 ((static_cast<int32_t>(dd) >> 4)*B22) | | 1115 B9 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) | |
1071 ((static_cast<int32_t>(dd) & 0xf)*B12) | | 1116 ((static_cast<int32_t>(dd) & 0xf) * B12) | |
1072 ((static_cast<int32_t>(sm) & 1)*B5) | | 1117 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1); |
1073 (static_cast<int32_t>(sm) >> 1); | |
1074 Emit(encoding); | 1118 Emit(encoding); |
1075 } | 1119 } |
1076 | 1120 |
1077 | 1121 |
1078 void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) { | 1122 void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) { |
1079 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm); | 1123 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm); |
1080 } | 1124 } |
1081 | 1125 |
1082 | 1126 |
1083 void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) { | 1127 void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) { |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1141 | 1185 |
1142 | 1186 |
1143 void Assembler::vcmpdz(DRegister dd, Condition cond) { | 1187 void Assembler::vcmpdz(DRegister dd, Condition cond) { |
1144 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0); | 1188 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0); |
1145 } | 1189 } |
1146 | 1190 |
1147 | 1191 |
1148 void Assembler::vmrs(Register rd, Condition cond) { | 1192 void Assembler::vmrs(Register rd, Condition cond) { |
1149 ASSERT(TargetCPUFeatures::vfp_supported()); | 1193 ASSERT(TargetCPUFeatures::vfp_supported()); |
1150 ASSERT(cond != kNoCondition); | 1194 ASSERT(cond != kNoCondition); |
1151 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 1195 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
1152 B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 | | 1196 B26 | B25 | B23 | B22 | B21 | B20 | B16 | |
1153 (static_cast<int32_t>(rd)*B12) | | 1197 (static_cast<int32_t>(rd) * B12) | B11 | B9 | B4; |
1154 B11 | B9 | B4; | |
1155 Emit(encoding); | 1198 Emit(encoding); |
1156 } | 1199 } |
1157 | 1200 |
1158 | 1201 |
1159 void Assembler::vmstat(Condition cond) { | 1202 void Assembler::vmstat(Condition cond) { |
1160 vmrs(APSR, cond); | 1203 vmrs(APSR, cond); |
1161 } | 1204 } |
1162 | 1205 |
1163 | 1206 |
1164 static inline int ShiftOfOperandSize(OperandSize size) { | 1207 static inline int ShiftOfOperandSize(OperandSize size) { |
(...skipping 15 matching lines...) Expand all Loading... |
1180 default: | 1223 default: |
1181 UNREACHABLE(); | 1224 UNREACHABLE(); |
1182 break; | 1225 break; |
1183 } | 1226 } |
1184 | 1227 |
1185 UNREACHABLE(); | 1228 UNREACHABLE(); |
1186 return -1; | 1229 return -1; |
1187 } | 1230 } |
1188 | 1231 |
1189 | 1232 |
1190 void Assembler::EmitSIMDqqq(int32_t opcode, OperandSize size, | 1233 void Assembler::EmitSIMDqqq(int32_t opcode, |
1191 QRegister qd, QRegister qn, QRegister qm) { | 1234 OperandSize size, |
| 1235 QRegister qd, |
| 1236 QRegister qn, |
| 1237 QRegister qm) { |
1192 ASSERT(TargetCPUFeatures::neon_supported()); | 1238 ASSERT(TargetCPUFeatures::neon_supported()); |
1193 int sz = ShiftOfOperandSize(size); | 1239 int sz = ShiftOfOperandSize(size); |
1194 int32_t encoding = | 1240 int32_t encoding = |
1195 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | | 1241 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | B6 | |
1196 B25 | B6 | | |
1197 opcode | ((sz & 0x3) * B20) | | 1242 opcode | ((sz & 0x3) * B20) | |
1198 ((static_cast<int32_t>(qd * 2) >> 4)*B22) | | 1243 ((static_cast<int32_t>(qd * 2) >> 4) * B22) | |
1199 ((static_cast<int32_t>(qn * 2) & 0xf)*B16) | | 1244 ((static_cast<int32_t>(qn * 2) & 0xf) * B16) | |
1200 ((static_cast<int32_t>(qd * 2) & 0xf)*B12) | | 1245 ((static_cast<int32_t>(qd * 2) & 0xf) * B12) | |
1201 ((static_cast<int32_t>(qn * 2) >> 4)*B7) | | 1246 ((static_cast<int32_t>(qn * 2) >> 4) * B7) | |
1202 ((static_cast<int32_t>(qm * 2) >> 4)*B5) | | 1247 ((static_cast<int32_t>(qm * 2) >> 4) * B5) | |
1203 (static_cast<int32_t>(qm * 2) & 0xf); | 1248 (static_cast<int32_t>(qm * 2) & 0xf); |
1204 Emit(encoding); | 1249 Emit(encoding); |
1205 } | 1250 } |
1206 | 1251 |
1207 | 1252 |
1208 void Assembler::EmitSIMDddd(int32_t opcode, OperandSize size, | 1253 void Assembler::EmitSIMDddd(int32_t opcode, |
1209 DRegister dd, DRegister dn, DRegister dm) { | 1254 OperandSize size, |
| 1255 DRegister dd, |
| 1256 DRegister dn, |
| 1257 DRegister dm) { |
1210 ASSERT(TargetCPUFeatures::neon_supported()); | 1258 ASSERT(TargetCPUFeatures::neon_supported()); |
1211 int sz = ShiftOfOperandSize(size); | 1259 int sz = ShiftOfOperandSize(size); |
1212 int32_t encoding = | 1260 int32_t encoding = |
1213 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | | 1261 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | |
1214 B25 | | 1262 opcode | ((sz & 0x3) * B20) | ((static_cast<int32_t>(dd) >> 4) * B22) | |
1215 opcode | ((sz & 0x3) * B20) | | 1263 ((static_cast<int32_t>(dn) & 0xf) * B16) | |
1216 ((static_cast<int32_t>(dd) >> 4)*B22) | | 1264 ((static_cast<int32_t>(dd) & 0xf) * B12) | |
1217 ((static_cast<int32_t>(dn) & 0xf)*B16) | | 1265 ((static_cast<int32_t>(dn) >> 4) * B7) | |
1218 ((static_cast<int32_t>(dd) & 0xf)*B12) | | 1266 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
1219 ((static_cast<int32_t>(dn) >> 4)*B7) | | |
1220 ((static_cast<int32_t>(dm) >> 4)*B5) | | |
1221 (static_cast<int32_t>(dm) & 0xf); | |
1222 Emit(encoding); | 1267 Emit(encoding); |
1223 } | 1268 } |
1224 | 1269 |
1225 | 1270 |
1226 void Assembler::vmovq(QRegister qd, QRegister qm) { | 1271 void Assembler::vmovq(QRegister qd, QRegister qm) { |
1227 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm); | 1272 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm); |
1228 } | 1273 } |
1229 | 1274 |
1230 | 1275 |
1231 void Assembler::vaddqi(OperandSize sz, | 1276 void Assembler::vaddqi(OperandSize sz, |
1232 QRegister qd, QRegister qn, QRegister qm) { | 1277 QRegister qd, |
| 1278 QRegister qn, |
| 1279 QRegister qm) { |
1233 EmitSIMDqqq(B11, sz, qd, qn, qm); | 1280 EmitSIMDqqq(B11, sz, qd, qn, qm); |
1234 } | 1281 } |
1235 | 1282 |
1236 | 1283 |
1237 void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) { | 1284 void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) { |
1238 EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm); | 1285 EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm); |
1239 } | 1286 } |
1240 | 1287 |
1241 | 1288 |
1242 void Assembler::vsubqi(OperandSize sz, | 1289 void Assembler::vsubqi(OperandSize sz, |
1243 QRegister qd, QRegister qn, QRegister qm) { | 1290 QRegister qd, |
| 1291 QRegister qn, |
| 1292 QRegister qm) { |
1244 EmitSIMDqqq(B24 | B11, sz, qd, qn, qm); | 1293 EmitSIMDqqq(B24 | B11, sz, qd, qn, qm); |
1245 } | 1294 } |
1246 | 1295 |
1247 | 1296 |
1248 void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) { | 1297 void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) { |
1249 EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm); | 1298 EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm); |
1250 } | 1299 } |
1251 | 1300 |
1252 | 1301 |
1253 void Assembler::vmulqi(OperandSize sz, | 1302 void Assembler::vmulqi(OperandSize sz, |
1254 QRegister qd, QRegister qn, QRegister qm) { | 1303 QRegister qd, |
| 1304 QRegister qn, |
| 1305 QRegister qm) { |
1255 EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm); | 1306 EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm); |
1256 } | 1307 } |
1257 | 1308 |
1258 | 1309 |
1259 void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) { | 1310 void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) { |
1260 EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm); | 1311 EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm); |
1261 } | 1312 } |
1262 | 1313 |
1263 | 1314 |
1264 void Assembler::vshlqi(OperandSize sz, | 1315 void Assembler::vshlqi(OperandSize sz, |
1265 QRegister qd, QRegister qm, QRegister qn) { | 1316 QRegister qd, |
| 1317 QRegister qm, |
| 1318 QRegister qn) { |
1266 EmitSIMDqqq(B25 | B10, sz, qd, qn, qm); | 1319 EmitSIMDqqq(B25 | B10, sz, qd, qn, qm); |
1267 } | 1320 } |
1268 | 1321 |
1269 | 1322 |
1270 void Assembler::vshlqu(OperandSize sz, | 1323 void Assembler::vshlqu(OperandSize sz, |
1271 QRegister qd, QRegister qm, QRegister qn) { | 1324 QRegister qd, |
| 1325 QRegister qm, |
| 1326 QRegister qn) { |
1272 EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm); | 1327 EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm); |
1273 } | 1328 } |
1274 | 1329 |
1275 | 1330 |
1276 void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) { | 1331 void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) { |
1277 EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm); | 1332 EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm); |
1278 } | 1333 } |
1279 | 1334 |
1280 | 1335 |
1281 void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) { | 1336 void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) { |
(...skipping 20 matching lines...) Expand all Loading... |
1302 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm); | 1357 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm); |
1303 } | 1358 } |
1304 | 1359 |
1305 | 1360 |
1306 void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) { | 1361 void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) { |
1307 EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm); | 1362 EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm); |
1308 } | 1363 } |
1309 | 1364 |
1310 | 1365 |
1311 void Assembler::vabsqs(QRegister qd, QRegister qm) { | 1366 void Assembler::vabsqs(QRegister qd, QRegister qm) { |
1312 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord, | 1367 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord, qd, Q0, |
1313 qd, Q0, qm); | 1368 qm); |
1314 } | 1369 } |
1315 | 1370 |
1316 | 1371 |
1317 void Assembler::vnegqs(QRegister qd, QRegister qm) { | 1372 void Assembler::vnegqs(QRegister qd, QRegister qm) { |
1318 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord, | 1373 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord, |
1319 qd, Q0, qm); | 1374 qd, Q0, qm); |
1320 } | 1375 } |
1321 | 1376 |
1322 | 1377 |
1323 void Assembler::vrecpeqs(QRegister qd, QRegister qm) { | 1378 void Assembler::vrecpeqs(QRegister qd, QRegister qm) { |
1324 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, | 1379 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, qd, |
1325 qd, Q0, qm); | 1380 Q0, qm); |
1326 } | 1381 } |
1327 | 1382 |
1328 | 1383 |
1329 void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) { | 1384 void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) { |
1330 EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); | 1385 EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); |
1331 } | 1386 } |
1332 | 1387 |
1333 | 1388 |
1334 void Assembler::vrsqrteqs(QRegister qd, QRegister qm) { | 1389 void Assembler::vrsqrteqs(QRegister qd, QRegister qm) { |
1335 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, | 1390 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, kSWord, |
1336 kSWord, qd, Q0, qm); | 1391 qd, Q0, qm); |
1337 } | 1392 } |
1338 | 1393 |
1339 | 1394 |
1340 void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) { | 1395 void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) { |
1341 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); | 1396 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); |
1342 } | 1397 } |
1343 | 1398 |
1344 | 1399 |
1345 void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) { | 1400 void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) { |
1346 ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair)); | 1401 ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair)); |
(...skipping 11 matching lines...) Expand all Loading... |
1358 ASSERT((idx >= 0) && (idx < 4)); | 1413 ASSERT((idx >= 0) && (idx < 4)); |
1359 code = 2 | (idx << 2); | 1414 code = 2 | (idx << 2); |
1360 break; | 1415 break; |
1361 } | 1416 } |
1362 case kWord: | 1417 case kWord: |
1363 case kUnsignedWord: { | 1418 case kUnsignedWord: { |
1364 ASSERT((idx >= 0) && (idx < 2)); | 1419 ASSERT((idx >= 0) && (idx < 2)); |
1365 code = 4 | (idx << 3); | 1420 code = 4 | (idx << 3); |
1366 break; | 1421 break; |
1367 } | 1422 } |
1368 default: { | 1423 default: { break; } |
1369 break; | |
1370 } | |
1371 } | 1424 } |
1372 | 1425 |
1373 EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair, | 1426 EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair, |
1374 static_cast<DRegister>(qd * 2), | 1427 static_cast<DRegister>(qd * 2), |
1375 static_cast<DRegister>(code & 0xf), | 1428 static_cast<DRegister>(code & 0xf), dm); |
1376 dm); | |
1377 } | 1429 } |
1378 | 1430 |
1379 | 1431 |
1380 void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) { | 1432 void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) { |
1381 ASSERT((len >= 1) && (len <= 4)); | 1433 ASSERT((len >= 1) && (len <= 4)); |
1382 EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm); | 1434 EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm); |
1383 } | 1435 } |
1384 | 1436 |
1385 | 1437 |
1386 void Assembler::vzipqw(QRegister qd, QRegister qm) { | 1438 void Assembler::vzipqw(QRegister qd, QRegister qm) { |
1387 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm); | 1439 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm); |
1388 } | 1440 } |
1389 | 1441 |
1390 | 1442 |
1391 void Assembler::vceqqi(OperandSize sz, | 1443 void Assembler::vceqqi(OperandSize sz, |
1392 QRegister qd, QRegister qn, QRegister qm) { | 1444 QRegister qd, |
| 1445 QRegister qn, |
| 1446 QRegister qm) { |
1393 EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm); | 1447 EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm); |
1394 } | 1448 } |
1395 | 1449 |
1396 | 1450 |
1397 void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) { | 1451 void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) { |
1398 EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm); | 1452 EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm); |
1399 } | 1453 } |
1400 | 1454 |
1401 | 1455 |
1402 void Assembler::vcgeqi(OperandSize sz, | 1456 void Assembler::vcgeqi(OperandSize sz, |
1403 QRegister qd, QRegister qn, QRegister qm) { | 1457 QRegister qd, |
| 1458 QRegister qn, |
| 1459 QRegister qm) { |
1404 EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm); | 1460 EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm); |
1405 } | 1461 } |
1406 | 1462 |
1407 | 1463 |
1408 void Assembler::vcugeqi(OperandSize sz, | 1464 void Assembler::vcugeqi(OperandSize sz, |
1409 QRegister qd, QRegister qn, QRegister qm) { | 1465 QRegister qd, |
| 1466 QRegister qn, |
| 1467 QRegister qm) { |
1410 EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm); | 1468 EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm); |
1411 } | 1469 } |
1412 | 1470 |
1413 | 1471 |
1414 void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) { | 1472 void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) { |
1415 EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm); | 1473 EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm); |
1416 } | 1474 } |
1417 | 1475 |
1418 | 1476 |
1419 void Assembler::vcgtqi(OperandSize sz, | 1477 void Assembler::vcgtqi(OperandSize sz, |
1420 QRegister qd, QRegister qn, QRegister qm) { | 1478 QRegister qd, |
| 1479 QRegister qn, |
| 1480 QRegister qm) { |
1421 EmitSIMDqqq(B9 | B8, sz, qd, qn, qm); | 1481 EmitSIMDqqq(B9 | B8, sz, qd, qn, qm); |
1422 } | 1482 } |
1423 | 1483 |
1424 | 1484 |
1425 void Assembler::vcugtqi(OperandSize sz, | 1485 void Assembler::vcugtqi(OperandSize sz, |
1426 QRegister qd, QRegister qn, QRegister qm) { | 1486 QRegister qd, |
| 1487 QRegister qn, |
| 1488 QRegister qm) { |
1427 EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm); | 1489 EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm); |
1428 } | 1490 } |
1429 | 1491 |
1430 | 1492 |
1431 void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) { | 1493 void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) { |
1432 EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm); | 1494 EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm); |
1433 } | 1495 } |
1434 | 1496 |
1435 | 1497 |
1436 void Assembler::bkpt(uint16_t imm16) { | 1498 void Assembler::bkpt(uint16_t imm16) { |
1437 Emit(BkptEncoding(imm16)); | 1499 Emit(BkptEncoding(imm16)); |
1438 } | 1500 } |
1439 | 1501 |
1440 | 1502 |
1441 void Assembler::b(Label* label, Condition cond) { | 1503 void Assembler::b(Label* label, Condition cond) { |
1442 EmitBranch(cond, label, false); | 1504 EmitBranch(cond, label, false); |
1443 } | 1505 } |
1444 | 1506 |
1445 | 1507 |
1446 void Assembler::bl(Label* label, Condition cond) { | 1508 void Assembler::bl(Label* label, Condition cond) { |
1447 EmitBranch(cond, label, true); | 1509 EmitBranch(cond, label, true); |
1448 } | 1510 } |
1449 | 1511 |
1450 | 1512 |
1451 void Assembler::bx(Register rm, Condition cond) { | 1513 void Assembler::bx(Register rm, Condition cond) { |
1452 ASSERT(rm != kNoRegister); | 1514 ASSERT(rm != kNoRegister); |
1453 ASSERT(cond != kNoCondition); | 1515 ASSERT(cond != kNoCondition); |
1454 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 1516 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
1455 B24 | B21 | (0xfff << 8) | B4 | | 1517 B21 | (0xfff << 8) | B4 | |
1456 (static_cast<int32_t>(rm) << kRmShift); | 1518 (static_cast<int32_t>(rm) << kRmShift); |
1457 Emit(encoding); | 1519 Emit(encoding); |
1458 } | 1520 } |
1459 | 1521 |
1460 | 1522 |
1461 void Assembler::blx(Register rm, Condition cond) { | 1523 void Assembler::blx(Register rm, Condition cond) { |
1462 ASSERT(rm != kNoRegister); | 1524 ASSERT(rm != kNoRegister); |
1463 ASSERT(cond != kNoCondition); | 1525 ASSERT(cond != kNoCondition); |
1464 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | | 1526 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
1465 B24 | B21 | (0xfff << 8) | B5 | B4 | | 1527 B21 | (0xfff << 8) | B5 | B4 | |
1466 (static_cast<int32_t>(rm) << kRmShift); | 1528 (static_cast<int32_t>(rm) << kRmShift); |
1467 Emit(encoding); | 1529 Emit(encoding); |
1468 } | 1530 } |
1469 | 1531 |
1470 | 1532 |
1471 void Assembler::MarkExceptionHandler(Label* label) { | 1533 void Assembler::MarkExceptionHandler(Label* label) { |
1472 EmitType01(AL, 1, TST, 1, PC, R0, Operand(0)); | 1534 EmitType01(AL, 1, TST, 1, PC, R0, Operand(0)); |
1473 Label l; | 1535 Label l; |
1474 b(&l); | 1536 b(&l); |
1475 EmitBranch(AL, label, false); | 1537 EmitBranch(AL, label, false); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1523 Comment("CheckCodePointer"); | 1585 Comment("CheckCodePointer"); |
1524 Label cid_ok, instructions_ok; | 1586 Label cid_ok, instructions_ok; |
1525 Push(R0); | 1587 Push(R0); |
1526 Push(IP); | 1588 Push(IP); |
1527 CompareClassId(CODE_REG, kCodeCid, R0); | 1589 CompareClassId(CODE_REG, kCodeCid, R0); |
1528 b(&cid_ok, EQ); | 1590 b(&cid_ok, EQ); |
1529 bkpt(0); | 1591 bkpt(0); |
1530 Bind(&cid_ok); | 1592 Bind(&cid_ok); |
1531 | 1593 |
1532 const intptr_t offset = CodeSize() + Instr::kPCReadOffset + | 1594 const intptr_t offset = CodeSize() + Instr::kPCReadOffset + |
1533 Instructions::HeaderSize() - kHeapObjectTag; | 1595 Instructions::HeaderSize() - kHeapObjectTag; |
1534 mov(R0, Operand(PC)); | 1596 mov(R0, Operand(PC)); |
1535 AddImmediate(R0, R0, -offset); | 1597 AddImmediate(R0, R0, -offset); |
1536 ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset())); | 1598 ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset())); |
1537 cmp(R0, Operand(IP)); | 1599 cmp(R0, Operand(IP)); |
1538 b(&instructions_ok, EQ); | 1600 b(&instructions_ok, EQ); |
1539 bkpt(1); | 1601 bkpt(1); |
1540 Bind(&instructions_ok); | 1602 Bind(&instructions_ok); |
1541 Pop(IP); | 1603 Pop(IP); |
1542 Pop(R0); | 1604 Pop(R0); |
1543 #endif | 1605 #endif |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1588 // Load common VM constants from the thread. This works also in places where | 1650 // Load common VM constants from the thread. This works also in places where |
1589 // no constant pool is set up (e.g. intrinsic code). | 1651 // no constant pool is set up (e.g. intrinsic code). |
1590 ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond); | 1652 ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond); |
1591 } else if (object.IsSmi()) { | 1653 } else if (object.IsSmi()) { |
1592 // Relocation doesn't apply to Smis. | 1654 // Relocation doesn't apply to Smis. |
1593 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond); | 1655 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond); |
1594 } else if (CanLoadFromObjectPool(object)) { | 1656 } else if (CanLoadFromObjectPool(object)) { |
1595 // Make sure that class CallPattern is able to decode this load from the | 1657 // Make sure that class CallPattern is able to decode this load from the |
1596 // object pool. | 1658 // object pool. |
1597 const int32_t offset = ObjectPool::element_offset( | 1659 const int32_t offset = ObjectPool::element_offset( |
1598 is_unique ? object_pool_wrapper_.AddObject(object) | 1660 is_unique ? object_pool_wrapper_.AddObject(object) |
1599 : object_pool_wrapper_.FindObject(object)); | 1661 : object_pool_wrapper_.FindObject(object)); |
1600 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond); | 1662 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond); |
1601 } else { | 1663 } else { |
1602 UNREACHABLE(); | 1664 UNREACHABLE(); |
1603 } | 1665 } |
1604 } | 1666 } |
1605 | 1667 |
1606 | 1668 |
1607 void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { | 1669 void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { |
1608 LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP); | 1670 LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP); |
1609 } | 1671 } |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1693 ASSERT(reg != FP); | 1755 ASSERT(reg != FP); |
1694 ASSERT(reg != PC); | 1756 ASSERT(reg != PC); |
1695 ASSERT((*used & (1 << reg)) == 0); | 1757 ASSERT((*used & (1 << reg)) == 0); |
1696 *used |= (1 << reg); | 1758 *used |= (1 << reg); |
1697 return reg; | 1759 return reg; |
1698 } | 1760 } |
1699 | 1761 |
1700 | 1762 |
1701 Register AllocateRegister(RegList* used) { | 1763 Register AllocateRegister(RegList* used) { |
1702 const RegList free = ~*used; | 1764 const RegList free = ~*used; |
1703 return (free == 0) ? | 1765 return (free == 0) |
1704 kNoRegister : | 1766 ? kNoRegister |
1705 UseRegister(static_cast<Register>(Utils::CountTrailingZeros(free)), used); | 1767 : UseRegister( |
| 1768 static_cast<Register>(Utils::CountTrailingZeros(free)), |
| 1769 used); |
1706 } | 1770 } |
1707 | 1771 |
1708 | 1772 |
1709 void Assembler::StoreIntoObject(Register object, | 1773 void Assembler::StoreIntoObject(Register object, |
1710 const Address& dest, | 1774 const Address& dest, |
1711 Register value, | 1775 Register value, |
1712 bool can_value_be_smi) { | 1776 bool can_value_be_smi) { |
1713 ASSERT(object != value); | 1777 ASSERT(object != value); |
1714 str(value, dest); | 1778 str(value, dest); |
1715 Label done; | 1779 Label done; |
(...skipping 18 matching lines...) Expand all Loading... |
1734 Bind(&done); | 1798 Bind(&done); |
1735 } | 1799 } |
1736 | 1800 |
1737 | 1801 |
1738 void Assembler::StoreIntoObjectOffset(Register object, | 1802 void Assembler::StoreIntoObjectOffset(Register object, |
1739 int32_t offset, | 1803 int32_t offset, |
1740 Register value, | 1804 Register value, |
1741 bool can_value_be_smi) { | 1805 bool can_value_be_smi) { |
1742 int32_t ignored = 0; | 1806 int32_t ignored = 0; |
1743 if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { | 1807 if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { |
1744 StoreIntoObject( | 1808 StoreIntoObject(object, FieldAddress(object, offset), value, |
1745 object, FieldAddress(object, offset), value, can_value_be_smi); | 1809 can_value_be_smi); |
1746 } else { | 1810 } else { |
1747 AddImmediate(IP, object, offset - kHeapObjectTag); | 1811 AddImmediate(IP, object, offset - kHeapObjectTag); |
1748 StoreIntoObject(object, Address(IP), value, can_value_be_smi); | 1812 StoreIntoObject(object, Address(IP), value, can_value_be_smi); |
1749 } | 1813 } |
1750 } | 1814 } |
1751 | 1815 |
1752 | 1816 |
1753 void Assembler::StoreIntoObjectNoBarrier(Register object, | 1817 void Assembler::StoreIntoObjectNoBarrier(Register object, |
1754 const Address& dest, | 1818 const Address& dest, |
1755 Register value) { | 1819 Register value) { |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1838 void Assembler::InitializeFieldsNoBarrierUnrolled(Register object, | 1902 void Assembler::InitializeFieldsNoBarrierUnrolled(Register object, |
1839 Register base, | 1903 Register base, |
1840 intptr_t begin_offset, | 1904 intptr_t begin_offset, |
1841 intptr_t end_offset, | 1905 intptr_t end_offset, |
1842 Register value_even, | 1906 Register value_even, |
1843 Register value_odd) { | 1907 Register value_odd) { |
1844 ASSERT(value_odd == value_even + 1); | 1908 ASSERT(value_odd == value_even + 1); |
1845 intptr_t current_offset = begin_offset; | 1909 intptr_t current_offset = begin_offset; |
1846 while (current_offset + kWordSize < end_offset) { | 1910 while (current_offset + kWordSize < end_offset) { |
1847 strd(value_even, value_odd, base, current_offset); | 1911 strd(value_even, value_odd, base, current_offset); |
1848 current_offset += 2*kWordSize; | 1912 current_offset += 2 * kWordSize; |
1849 } | 1913 } |
1850 while (current_offset < end_offset) { | 1914 while (current_offset < end_offset) { |
1851 str(value_even, Address(base, current_offset)); | 1915 str(value_even, Address(base, current_offset)); |
1852 current_offset += kWordSize; | 1916 current_offset += kWordSize; |
1853 } | 1917 } |
1854 #if defined(DEBUG) | 1918 #if defined(DEBUG) |
1855 Label done; | 1919 Label done; |
1856 StoreIntoObjectFilter(object, value_even, &done); | 1920 StoreIntoObjectFilter(object, value_even, &done); |
1857 StoreIntoObjectFilter(object, value_odd, &done); | 1921 StoreIntoObjectFilter(object, value_odd, &done); |
1858 Stop("Store buffer update is required"); | 1922 Stop("Store buffer update is required"); |
(...skipping 11 matching lines...) Expand all Loading... |
1870 Stop("New value must be Smi."); | 1934 Stop("New value must be Smi."); |
1871 Bind(&done); | 1935 Bind(&done); |
1872 #endif // defined(DEBUG) | 1936 #endif // defined(DEBUG) |
1873 str(value, dest); | 1937 str(value, dest); |
1874 } | 1938 } |
1875 | 1939 |
1876 | 1940 |
1877 void Assembler::LoadClassId(Register result, Register object, Condition cond) { | 1941 void Assembler::LoadClassId(Register result, Register object, Condition cond) { |
1878 ASSERT(RawObject::kClassIdTagPos == 16); | 1942 ASSERT(RawObject::kClassIdTagPos == 16); |
1879 ASSERT(RawObject::kClassIdTagSize == 16); | 1943 ASSERT(RawObject::kClassIdTagSize == 16); |
1880 const intptr_t class_id_offset = Object::tags_offset() + | 1944 const intptr_t class_id_offset = |
1881 RawObject::kClassIdTagPos / kBitsPerByte; | 1945 Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; |
1882 ldrh(result, FieldAddress(object, class_id_offset), cond); | 1946 ldrh(result, FieldAddress(object, class_id_offset), cond); |
1883 } | 1947 } |
1884 | 1948 |
1885 | 1949 |
1886 void Assembler::LoadClassById(Register result, Register class_id) { | 1950 void Assembler::LoadClassById(Register result, Register class_id) { |
1887 ASSERT(result != class_id); | 1951 ASSERT(result != class_id); |
1888 LoadIsolate(result); | 1952 LoadIsolate(result); |
1889 const intptr_t offset = | 1953 const intptr_t offset = |
1890 Isolate::class_table_offset() + ClassTable::table_offset(); | 1954 Isolate::class_table_offset() + ClassTable::table_offset(); |
1891 LoadFromOffset(kWord, result, result, offset); | 1955 LoadFromOffset(kWord, result, result, offset); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1926 return Utils::IsInt(Utils::CountOneBits(kBranchOffsetMask), offset); | 1990 return Utils::IsInt(Utils::CountOneBits(kBranchOffsetMask), offset); |
1927 } | 1991 } |
1928 | 1992 |
1929 | 1993 |
1930 int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) { | 1994 int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) { |
1931 // The offset is off by 8 due to the way the ARM CPUs read PC. | 1995 // The offset is off by 8 due to the way the ARM CPUs read PC. |
1932 offset -= Instr::kPCReadOffset; | 1996 offset -= Instr::kPCReadOffset; |
1933 | 1997 |
1934 if (!CanEncodeBranchOffset(offset)) { | 1998 if (!CanEncodeBranchOffset(offset)) { |
1935 ASSERT(!use_far_branches()); | 1999 ASSERT(!use_far_branches()); |
1936 Thread::Current()->long_jump_base()->Jump( | 2000 Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error()); |
1937 1, Object::branch_offset_error()); | |
1938 } | 2001 } |
1939 | 2002 |
1940 // Properly preserve only the bits supported in the instruction. | 2003 // Properly preserve only the bits supported in the instruction. |
1941 offset >>= 2; | 2004 offset >>= 2; |
1942 offset &= kBranchOffsetMask; | 2005 offset &= kBranchOffsetMask; |
1943 return (inst & ~kBranchOffsetMask) | offset; | 2006 return (inst & ~kBranchOffsetMask) | offset; |
1944 } | 2007 } |
1945 | 2008 |
1946 | 2009 |
1947 int Assembler::DecodeBranchOffset(int32_t inst) { | 2010 int Assembler::DecodeBranchOffset(int32_t inst) { |
1948 // Sign-extend, left-shift by 2, then add 8. | 2011 // Sign-extend, left-shift by 2, then add 8. |
1949 return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset); | 2012 return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset); |
1950 } | 2013 } |
1951 | 2014 |
1952 | 2015 |
1953 static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) { | 2016 static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) { |
1954 int32_t offset = 0; | 2017 int32_t offset = 0; |
1955 offset |= (movt & 0xf0000) << 12; | 2018 offset |= (movt & 0xf0000) << 12; |
1956 offset |= (movt & 0xfff) << 16; | 2019 offset |= (movt & 0xfff) << 16; |
1957 offset |= (movw & 0xf0000) >> 4; | 2020 offset |= (movw & 0xf0000) >> 4; |
1958 offset |= movw & 0xfff; | 2021 offset |= movw & 0xfff; |
1959 return offset; | 2022 return offset; |
1960 } | 2023 } |
1961 | 2024 |
1962 | 2025 |
1963 static int32_t DecodeARMv6LoadImmediate(int32_t mov, int32_t or1, | 2026 static int32_t DecodeARMv6LoadImmediate(int32_t mov, |
1964 int32_t or2, int32_t or3) { | 2027 int32_t or1, |
| 2028 int32_t or2, |
| 2029 int32_t or3) { |
1965 int32_t offset = 0; | 2030 int32_t offset = 0; |
1966 offset |= (mov & 0xff) << 24; | 2031 offset |= (mov & 0xff) << 24; |
1967 offset |= (or1 & 0xff) << 16; | 2032 offset |= (or1 & 0xff) << 16; |
1968 offset |= (or2 & 0xff) << 8; | 2033 offset |= (or2 & 0xff) << 8; |
1969 offset |= (or3 & 0xff); | 2034 offset |= (or3 & 0xff); |
1970 return offset; | 2035 return offset; |
1971 } | 2036 } |
1972 | 2037 |
1973 | 2038 |
1974 class PatchFarBranch : public AssemblerFixup { | 2039 class PatchFarBranch : public AssemblerFixup { |
1975 public: | 2040 public: |
1976 PatchFarBranch() {} | 2041 PatchFarBranch() {} |
1977 | 2042 |
1978 void Process(const MemoryRegion& region, intptr_t position) { | 2043 void Process(const MemoryRegion& region, intptr_t position) { |
1979 const ARMVersion version = TargetCPUFeatures::arm_version(); | 2044 const ARMVersion version = TargetCPUFeatures::arm_version(); |
1980 if ((version == ARMv5TE) || (version == ARMv6)) { | 2045 if ((version == ARMv5TE) || (version == ARMv6)) { |
1981 ProcessARMv6(region, position); | 2046 ProcessARMv6(region, position); |
1982 } else { | 2047 } else { |
1983 ASSERT(version == ARMv7); | 2048 ASSERT(version == ARMv7); |
1984 ProcessARMv7(region, position); | 2049 ProcessARMv7(region, position); |
1985 } | 2050 } |
1986 } | 2051 } |
1987 | 2052 |
1988 private: | 2053 private: |
1989 void ProcessARMv6(const MemoryRegion& region, intptr_t position) { | 2054 void ProcessARMv6(const MemoryRegion& region, intptr_t position) { |
1990 const int32_t mov = region.Load<int32_t>(position); | 2055 const int32_t mov = region.Load<int32_t>(position); |
1991 const int32_t or1 = region.Load<int32_t>(position + 1*Instr::kInstrSize); | 2056 const int32_t or1 = region.Load<int32_t>(position + 1 * Instr::kInstrSize); |
1992 const int32_t or2 = region.Load<int32_t>(position + 2*Instr::kInstrSize); | 2057 const int32_t or2 = region.Load<int32_t>(position + 2 * Instr::kInstrSize); |
1993 const int32_t or3 = region.Load<int32_t>(position + 3*Instr::kInstrSize); | 2058 const int32_t or3 = region.Load<int32_t>(position + 3 * Instr::kInstrSize); |
1994 const int32_t bx = region.Load<int32_t>(position + 4*Instr::kInstrSize); | 2059 const int32_t bx = region.Load<int32_t>(position + 4 * Instr::kInstrSize); |
1995 | 2060 |
1996 if (((mov & 0xffffff00) == 0xe3a0c400) && // mov IP, (byte3 rot 4) | 2061 if (((mov & 0xffffff00) == 0xe3a0c400) && // mov IP, (byte3 rot 4) |
1997 ((or1 & 0xffffff00) == 0xe38cc800) && // orr IP, IP, (byte2 rot 8) | 2062 ((or1 & 0xffffff00) == 0xe38cc800) && // orr IP, IP, (byte2 rot 8) |
1998 ((or2 & 0xffffff00) == 0xe38ccc00) && // orr IP, IP, (byte1 rot 12) | 2063 ((or2 & 0xffffff00) == 0xe38ccc00) && // orr IP, IP, (byte1 rot 12) |
1999 ((or3 & 0xffffff00) == 0xe38cc000)) { // orr IP, IP, byte0 | 2064 ((or3 & 0xffffff00) == 0xe38cc000)) { // orr IP, IP, byte0 |
2000 const int32_t offset = DecodeARMv6LoadImmediate(mov, or1, or2, or3); | 2065 const int32_t offset = DecodeARMv6LoadImmediate(mov, or1, or2, or3); |
2001 const int32_t dest = region.start() + offset; | 2066 const int32_t dest = region.start() + offset; |
2002 const int32_t dest0 = (dest & 0x000000ff); | 2067 const int32_t dest0 = (dest & 0x000000ff); |
2003 const int32_t dest1 = (dest & 0x0000ff00) >> 8; | 2068 const int32_t dest1 = (dest & 0x0000ff00) >> 8; |
2004 const int32_t dest2 = (dest & 0x00ff0000) >> 16; | 2069 const int32_t dest2 = (dest & 0x00ff0000) >> 16; |
2005 const int32_t dest3 = (dest & 0xff000000) >> 24; | 2070 const int32_t dest3 = (dest & 0xff000000) >> 24; |
2006 const int32_t patched_mov = 0xe3a0c400 | dest3; | 2071 const int32_t patched_mov = 0xe3a0c400 | dest3; |
2007 const int32_t patched_or1 = 0xe38cc800 | dest2; | 2072 const int32_t patched_or1 = 0xe38cc800 | dest2; |
2008 const int32_t patched_or2 = 0xe38ccc00 | dest1; | 2073 const int32_t patched_or2 = 0xe38ccc00 | dest1; |
2009 const int32_t patched_or3 = 0xe38cc000 | dest0; | 2074 const int32_t patched_or3 = 0xe38cc000 | dest0; |
2010 | 2075 |
2011 region.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_mov); | 2076 region.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_mov); |
2012 region.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_or1); | 2077 region.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_or1); |
2013 region.Store<int32_t>(position + 2 * Instr::kInstrSize, patched_or2); | 2078 region.Store<int32_t>(position + 2 * Instr::kInstrSize, patched_or2); |
2014 region.Store<int32_t>(position + 3 * Instr::kInstrSize, patched_or3); | 2079 region.Store<int32_t>(position + 3 * Instr::kInstrSize, patched_or3); |
2015 return; | 2080 return; |
2016 } | 2081 } |
2017 | 2082 |
2018 // If the offset loading instructions aren't there, we must have replaced | 2083 // If the offset loading instructions aren't there, we must have replaced |
2019 // the far branch with a near one, and so these instructions | 2084 // the far branch with a near one, and so these instructions |
2020 // should be NOPs. | 2085 // should be NOPs. |
2021 ASSERT((or1 == Instr::kNopInstruction) && | 2086 ASSERT((or1 == Instr::kNopInstruction) && (or2 == Instr::kNopInstruction) && |
2022 (or2 == Instr::kNopInstruction) && | 2087 (or3 == Instr::kNopInstruction) && (bx == Instr::kNopInstruction)); |
2023 (or3 == Instr::kNopInstruction) && | |
2024 (bx == Instr::kNopInstruction)); | |
2025 } | 2088 } |
2026 | 2089 |
2027 | 2090 |
2028 void ProcessARMv7(const MemoryRegion& region, intptr_t position) { | 2091 void ProcessARMv7(const MemoryRegion& region, intptr_t position) { |
2029 const int32_t movw = region.Load<int32_t>(position); | 2092 const int32_t movw = region.Load<int32_t>(position); |
2030 const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize); | 2093 const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize); |
2031 const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize); | 2094 const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize); |
2032 | 2095 |
2033 if (((movt & 0xfff0f000) == 0xe340c000) && // movt IP, high | 2096 if (((movt & 0xfff0f000) == 0xe340c000) && // movt IP, high |
2034 ((movw & 0xfff0f000) == 0xe300c000)) { // movw IP, low | 2097 ((movw & 0xfff0f000) == 0xe300c000)) { // movw IP, low |
2035 const int32_t offset = DecodeARMv7LoadImmediate(movt, movw); | 2098 const int32_t offset = DecodeARMv7LoadImmediate(movt, movw); |
2036 const int32_t dest = region.start() + offset; | 2099 const int32_t dest = region.start() + offset; |
2037 const uint16_t dest_high = Utils::High16Bits(dest); | 2100 const uint16_t dest_high = Utils::High16Bits(dest); |
2038 const uint16_t dest_low = Utils::Low16Bits(dest); | 2101 const uint16_t dest_low = Utils::Low16Bits(dest); |
2039 const int32_t patched_movt = | 2102 const int32_t patched_movt = |
2040 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff); | 2103 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff); |
2041 const int32_t patched_movw = | 2104 const int32_t patched_movw = |
2042 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff); | 2105 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff); |
2043 | 2106 |
2044 region.Store<int32_t>(position, patched_movw); | 2107 region.Store<int32_t>(position, patched_movw); |
2045 region.Store<int32_t>(position + Instr::kInstrSize, patched_movt); | 2108 region.Store<int32_t>(position + Instr::kInstrSize, patched_movt); |
2046 return; | 2109 return; |
2047 } | 2110 } |
2048 | 2111 |
2049 // If the offset loading instructions aren't there, we must have replaced | 2112 // If the offset loading instructions aren't there, we must have replaced |
2050 // the far branch with a near one, and so these instructions | 2113 // the far branch with a near one, and so these instructions |
2051 // should be NOPs. | 2114 // should be NOPs. |
2052 ASSERT((movt == Instr::kNopInstruction) && | 2115 ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction)); |
2053 (bx == Instr::kNopInstruction)); | |
2054 } | 2116 } |
2055 | 2117 |
2056 virtual bool IsPointerOffset() const { return false; } | 2118 virtual bool IsPointerOffset() const { return false; } |
2057 }; | 2119 }; |
2058 | 2120 |
2059 | 2121 |
2060 void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) { | 2122 void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) { |
2061 buffer_.EmitFixup(new PatchFarBranch()); | 2123 buffer_.EmitFixup(new PatchFarBranch()); |
2062 LoadPatchableImmediate(IP, offset); | 2124 LoadPatchableImmediate(IP, offset); |
2063 if (link) { | 2125 if (link) { |
(...skipping 29 matching lines...) Expand all Loading... |
2093 void Assembler::BindARMv6(Label* label) { | 2155 void Assembler::BindARMv6(Label* label) { |
2094 ASSERT(!label->IsBound()); | 2156 ASSERT(!label->IsBound()); |
2095 intptr_t bound_pc = buffer_.Size(); | 2157 intptr_t bound_pc = buffer_.Size(); |
2096 while (label->IsLinked()) { | 2158 while (label->IsLinked()) { |
2097 const int32_t position = label->Position(); | 2159 const int32_t position = label->Position(); |
2098 int32_t dest = bound_pc - position; | 2160 int32_t dest = bound_pc - position; |
2099 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | 2161 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { |
2100 // Far branches are enabled and we can't encode the branch offset. | 2162 // Far branches are enabled and we can't encode the branch offset. |
2101 | 2163 |
2102 // Grab instructions that load the offset. | 2164 // Grab instructions that load the offset. |
2103 const int32_t mov = | 2165 const int32_t mov = buffer_.Load<int32_t>(position); |
2104 buffer_.Load<int32_t>(position); | |
2105 const int32_t or1 = | 2166 const int32_t or1 = |
2106 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize); | 2167 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize); |
2107 const int32_t or2 = | 2168 const int32_t or2 = |
2108 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); | 2169 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); |
2109 const int32_t or3 = | 2170 const int32_t or3 = |
2110 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); | 2171 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); |
2111 | 2172 |
2112 // Change from relative to the branch to relative to the assembler | 2173 // Change from relative to the branch to relative to the assembler |
2113 // buffer. | 2174 // buffer. |
2114 dest = buffer_.Size(); | 2175 dest = buffer_.Size(); |
2115 const int32_t dest0 = (dest & 0x000000ff); | 2176 const int32_t dest0 = (dest & 0x000000ff); |
2116 const int32_t dest1 = (dest & 0x0000ff00) >> 8; | 2177 const int32_t dest1 = (dest & 0x0000ff00) >> 8; |
2117 const int32_t dest2 = (dest & 0x00ff0000) >> 16; | 2178 const int32_t dest2 = (dest & 0x00ff0000) >> 16; |
2118 const int32_t dest3 = (dest & 0xff000000) >> 24; | 2179 const int32_t dest3 = (dest & 0xff000000) >> 24; |
2119 const int32_t patched_mov = 0xe3a0c400 | dest3; | 2180 const int32_t patched_mov = 0xe3a0c400 | dest3; |
2120 const int32_t patched_or1 = 0xe38cc800 | dest2; | 2181 const int32_t patched_or1 = 0xe38cc800 | dest2; |
2121 const int32_t patched_or2 = 0xe38ccc00 | dest1; | 2182 const int32_t patched_or2 = 0xe38ccc00 | dest1; |
2122 const int32_t patched_or3 = 0xe38cc000 | dest0; | 2183 const int32_t patched_or3 = 0xe38cc000 | dest0; |
2123 | 2184 |
2124 // Rewrite the instructions. | 2185 // Rewrite the instructions. |
2125 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_mov); | 2186 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_mov); |
2126 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_or1); | 2187 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_or1); |
2127 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, patched_or2); | 2188 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, patched_or2); |
2128 buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, patched_or3); | 2189 buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, patched_or3); |
2129 label->position_ = DecodeARMv6LoadImmediate(mov, or1, or2, or3); | 2190 label->position_ = DecodeARMv6LoadImmediate(mov, or1, or2, or3); |
2130 } else if (use_far_branches() && CanEncodeBranchOffset(dest)) { | 2191 } else if (use_far_branches() && CanEncodeBranchOffset(dest)) { |
2131 // Grab instructions that load the offset, and the branch. | 2192 // Grab instructions that load the offset, and the branch. |
2132 const int32_t mov = | 2193 const int32_t mov = buffer_.Load<int32_t>(position); |
2133 buffer_.Load<int32_t>(position); | |
2134 const int32_t or1 = | 2194 const int32_t or1 = |
2135 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize); | 2195 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize); |
2136 const int32_t or2 = | 2196 const int32_t or2 = |
2137 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); | 2197 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); |
2138 const int32_t or3 = | 2198 const int32_t or3 = |
2139 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); | 2199 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); |
2140 const int32_t branch = | 2200 const int32_t branch = |
2141 buffer_.Load<int32_t>(position + 4 * Instr::kInstrSize); | 2201 buffer_.Load<int32_t>(position + 4 * Instr::kInstrSize); |
2142 | 2202 |
2143 // Grab the branch condition, and encode the link bit. | 2203 // Grab the branch condition, and encode the link bit. |
2144 const int32_t cond = branch & 0xf0000000; | 2204 const int32_t cond = branch & 0xf0000000; |
2145 const int32_t link = (branch & 0x20) << 19; | 2205 const int32_t link = (branch & 0x20) << 19; |
2146 | 2206 |
2147 // Encode the branch and the offset. | 2207 // Encode the branch and the offset. |
2148 const int32_t new_branch = cond | link | 0x0a000000; | 2208 const int32_t new_branch = cond | link | 0x0a000000; |
2149 const int32_t encoded = EncodeBranchOffset(dest, new_branch); | 2209 const int32_t encoded = EncodeBranchOffset(dest, new_branch); |
2150 | 2210 |
2151 // Write the encoded branch instruction followed by two nops. | 2211 // Write the encoded branch instruction followed by two nops. |
2152 buffer_.Store<int32_t>(position, encoded); | 2212 buffer_.Store<int32_t>(position, encoded); |
2153 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, | 2213 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, |
2154 Instr::kNopInstruction); | 2214 Instr::kNopInstruction); |
2155 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, | 2215 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, |
2156 Instr::kNopInstruction); | 2216 Instr::kNopInstruction); |
2157 buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, | 2217 buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, |
2158 Instr::kNopInstruction); | 2218 Instr::kNopInstruction); |
2159 buffer_.Store<int32_t>(position + 4 * Instr::kInstrSize, | 2219 buffer_.Store<int32_t>(position + 4 * Instr::kInstrSize, |
2160 Instr::kNopInstruction); | 2220 Instr::kNopInstruction); |
2161 | 2221 |
2162 label->position_ = DecodeARMv6LoadImmediate(mov, or1, or2, or3); | 2222 label->position_ = DecodeARMv6LoadImmediate(mov, or1, or2, or3); |
2163 } else { | 2223 } else { |
2164 int32_t next = buffer_.Load<int32_t>(position); | 2224 int32_t next = buffer_.Load<int32_t>(position); |
2165 int32_t encoded = Assembler::EncodeBranchOffset(dest, next); | 2225 int32_t encoded = Assembler::EncodeBranchOffset(dest, next); |
2166 buffer_.Store<int32_t>(position, encoded); | 2226 buffer_.Store<int32_t>(position, encoded); |
2167 label->position_ = Assembler::DecodeBranchOffset(next); | 2227 label->position_ = Assembler::DecodeBranchOffset(next); |
2168 } | 2228 } |
2169 } | 2229 } |
2170 label->BindTo(bound_pc); | 2230 label->BindTo(bound_pc); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2213 | 2273 |
2214 // Grab the branch condition, and encode the link bit. | 2274 // Grab the branch condition, and encode the link bit. |
2215 const int32_t cond = branch & 0xf0000000; | 2275 const int32_t cond = branch & 0xf0000000; |
2216 const int32_t link = (branch & 0x20) << 19; | 2276 const int32_t link = (branch & 0x20) << 19; |
2217 | 2277 |
2218 // Encode the branch and the offset. | 2278 // Encode the branch and the offset. |
2219 const int32_t new_branch = cond | link | 0x0a000000; | 2279 const int32_t new_branch = cond | link | 0x0a000000; |
2220 const int32_t encoded = EncodeBranchOffset(dest, new_branch); | 2280 const int32_t encoded = EncodeBranchOffset(dest, new_branch); |
2221 | 2281 |
2222 // Write the encoded branch instruction followed by two nops. | 2282 // Write the encoded branch instruction followed by two nops. |
2223 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, | 2283 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, encoded); |
2224 encoded); | |
2225 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, | 2284 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, |
2226 Instr::kNopInstruction); | 2285 Instr::kNopInstruction); |
2227 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, | 2286 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, |
2228 Instr::kNopInstruction); | 2287 Instr::kNopInstruction); |
2229 | 2288 |
2230 label->position_ = DecodeARMv7LoadImmediate(movt, movw); | 2289 label->position_ = DecodeARMv7LoadImmediate(movt, movw); |
2231 } else { | 2290 } else { |
2232 int32_t next = buffer_.Load<int32_t>(position); | 2291 int32_t next = buffer_.Load<int32_t>(position); |
2233 int32_t encoded = Assembler::EncodeBranchOffset(dest, next); | 2292 int32_t encoded = Assembler::EncodeBranchOffset(dest, next); |
2234 buffer_.Store<int32_t>(position, encoded); | 2293 buffer_.Store<int32_t>(position, encoded); |
2235 label->position_ = Assembler::DecodeBranchOffset(next); | 2294 label->position_ = Assembler::DecodeBranchOffset(next); |
2236 } | 2295 } |
2237 } | 2296 } |
2238 label->BindTo(bound_pc); | 2297 label->BindTo(bound_pc); |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2361 return offset == 0; | 2420 return offset == 0; |
2362 } | 2421 } |
2363 default: { | 2422 default: { |
2364 UNREACHABLE(); | 2423 UNREACHABLE(); |
2365 return false; | 2424 return false; |
2366 } | 2425 } |
2367 } | 2426 } |
2368 } | 2427 } |
2369 | 2428 |
2370 | 2429 |
2371 bool Address::CanHoldImmediateOffset( | 2430 bool Address::CanHoldImmediateOffset(bool is_load, |
2372 bool is_load, intptr_t cid, int64_t offset) { | 2431 intptr_t cid, |
| 2432 int64_t offset) { |
2373 int32_t offset_mask = 0; | 2433 int32_t offset_mask = 0; |
2374 if (is_load) { | 2434 if (is_load) { |
2375 return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask); | 2435 return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask); |
2376 } else { | 2436 } else { |
2377 return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask); | 2437 return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask); |
2378 } | 2438 } |
2379 } | 2439 } |
2380 | 2440 |
2381 | 2441 |
2382 void Assembler::Push(Register rd, Condition cond) { | 2442 void Assembler::Push(Register rd, Condition cond) { |
(...skipping 16 matching lines...) Expand all Loading... |
2399 } | 2459 } |
2400 | 2460 |
2401 | 2461 |
2402 void Assembler::MoveRegister(Register rd, Register rm, Condition cond) { | 2462 void Assembler::MoveRegister(Register rd, Register rm, Condition cond) { |
2403 if (rd != rm) { | 2463 if (rd != rm) { |
2404 mov(rd, Operand(rm), cond); | 2464 mov(rd, Operand(rm), cond); |
2405 } | 2465 } |
2406 } | 2466 } |
2407 | 2467 |
2408 | 2468 |
2409 void Assembler::Lsl(Register rd, Register rm, const Operand& shift_imm, | 2469 void Assembler::Lsl(Register rd, |
| 2470 Register rm, |
| 2471 const Operand& shift_imm, |
2410 Condition cond) { | 2472 Condition cond) { |
2411 ASSERT(shift_imm.type() == 1); | 2473 ASSERT(shift_imm.type() == 1); |
2412 ASSERT(shift_imm.encoding() != 0); // Do not use Lsl if no shift is wanted. | 2474 ASSERT(shift_imm.encoding() != 0); // Do not use Lsl if no shift is wanted. |
2413 mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond); | 2475 mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond); |
2414 } | 2476 } |
2415 | 2477 |
2416 | 2478 |
2417 void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) { | 2479 void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) { |
2418 mov(rd, Operand(rm, LSL, rs), cond); | 2480 mov(rd, Operand(rm, LSL, rs), cond); |
2419 } | 2481 } |
2420 | 2482 |
2421 | 2483 |
2422 void Assembler::Lsr(Register rd, Register rm, const Operand& shift_imm, | 2484 void Assembler::Lsr(Register rd, |
| 2485 Register rm, |
| 2486 const Operand& shift_imm, |
2423 Condition cond) { | 2487 Condition cond) { |
2424 ASSERT(shift_imm.type() == 1); | 2488 ASSERT(shift_imm.type() == 1); |
2425 uint32_t shift = shift_imm.encoding(); | 2489 uint32_t shift = shift_imm.encoding(); |
2426 ASSERT(shift != 0); // Do not use Lsr if no shift is wanted. | 2490 ASSERT(shift != 0); // Do not use Lsr if no shift is wanted. |
2427 if (shift == 32) { | 2491 if (shift == 32) { |
2428 shift = 0; // Comply to UAL syntax. | 2492 shift = 0; // Comply to UAL syntax. |
2429 } | 2493 } |
2430 mov(rd, Operand(rm, LSR, shift), cond); | 2494 mov(rd, Operand(rm, LSR, shift), cond); |
2431 } | 2495 } |
2432 | 2496 |
2433 | 2497 |
2434 void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) { | 2498 void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) { |
2435 mov(rd, Operand(rm, LSR, rs), cond); | 2499 mov(rd, Operand(rm, LSR, rs), cond); |
2436 } | 2500 } |
2437 | 2501 |
2438 | 2502 |
2439 void Assembler::Asr(Register rd, Register rm, const Operand& shift_imm, | 2503 void Assembler::Asr(Register rd, |
| 2504 Register rm, |
| 2505 const Operand& shift_imm, |
2440 Condition cond) { | 2506 Condition cond) { |
2441 ASSERT(shift_imm.type() == 1); | 2507 ASSERT(shift_imm.type() == 1); |
2442 uint32_t shift = shift_imm.encoding(); | 2508 uint32_t shift = shift_imm.encoding(); |
2443 ASSERT(shift != 0); // Do not use Asr if no shift is wanted. | 2509 ASSERT(shift != 0); // Do not use Asr if no shift is wanted. |
2444 if (shift == 32) { | 2510 if (shift == 32) { |
2445 shift = 0; // Comply to UAL syntax. | 2511 shift = 0; // Comply to UAL syntax. |
2446 } | 2512 } |
2447 mov(rd, Operand(rm, ASR, shift), cond); | 2513 mov(rd, Operand(rm, ASR, shift), cond); |
2448 } | 2514 } |
2449 | 2515 |
2450 | 2516 |
2451 void Assembler::Asrs(Register rd, Register rm, const Operand& shift_imm, | 2517 void Assembler::Asrs(Register rd, |
| 2518 Register rm, |
| 2519 const Operand& shift_imm, |
2452 Condition cond) { | 2520 Condition cond) { |
2453 ASSERT(shift_imm.type() == 1); | 2521 ASSERT(shift_imm.type() == 1); |
2454 uint32_t shift = shift_imm.encoding(); | 2522 uint32_t shift = shift_imm.encoding(); |
2455 ASSERT(shift != 0); // Do not use Asr if no shift is wanted. | 2523 ASSERT(shift != 0); // Do not use Asr if no shift is wanted. |
2456 if (shift == 32) { | 2524 if (shift == 32) { |
2457 shift = 0; // Comply to UAL syntax. | 2525 shift = 0; // Comply to UAL syntax. |
2458 } | 2526 } |
2459 movs(rd, Operand(rm, ASR, shift), cond); | 2527 movs(rd, Operand(rm, ASR, shift), cond); |
2460 } | 2528 } |
2461 | 2529 |
2462 | 2530 |
2463 void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) { | 2531 void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) { |
2464 mov(rd, Operand(rm, ASR, rs), cond); | 2532 mov(rd, Operand(rm, ASR, rs), cond); |
2465 } | 2533 } |
2466 | 2534 |
2467 | 2535 |
2468 void Assembler::Ror(Register rd, Register rm, const Operand& shift_imm, | 2536 void Assembler::Ror(Register rd, |
| 2537 Register rm, |
| 2538 const Operand& shift_imm, |
2469 Condition cond) { | 2539 Condition cond) { |
2470 ASSERT(shift_imm.type() == 1); | 2540 ASSERT(shift_imm.type() == 1); |
2471 ASSERT(shift_imm.encoding() != 0); // Use Rrx instruction. | 2541 ASSERT(shift_imm.encoding() != 0); // Use Rrx instruction. |
2472 mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond); | 2542 mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond); |
2473 } | 2543 } |
2474 | 2544 |
2475 | 2545 |
2476 void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) { | 2546 void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) { |
2477 mov(rd, Operand(rm, ROR, rs), cond); | 2547 mov(rd, Operand(rm, ROR, rs), cond); |
2478 } | 2548 } |
(...skipping 24 matching lines...) Expand all Loading... |
2503 | 2573 |
2504 | 2574 |
2505 void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) { | 2575 void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) { |
2506 ASSERT(qm != QTMP); | 2576 ASSERT(qm != QTMP); |
2507 ASSERT(qd != QTMP); | 2577 ASSERT(qd != QTMP); |
2508 | 2578 |
2509 // Reciprocal square root estimate. | 2579 // Reciprocal square root estimate. |
2510 vrsqrteqs(qd, qm); | 2580 vrsqrteqs(qd, qm); |
2511 // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. | 2581 // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
2512 // First step. | 2582 // First step. |
2513 vmulqs(QTMP, qd, qd); // QTMP <- xn^2 | 2583 vmulqs(QTMP, qd, qd); // QTMP <- xn^2 |
2514 vrsqrtsqs(QTMP, qm, QTMP); // QTMP <- (3 - Q1*QTMP) / 2. | 2584 vrsqrtsqs(QTMP, qm, QTMP); // QTMP <- (3 - Q1*QTMP) / 2. |
2515 vmulqs(qd, qd, QTMP); // xn+1 <- xn * QTMP | 2585 vmulqs(qd, qd, QTMP); // xn+1 <- xn * QTMP |
2516 // Second step. | 2586 // Second step. |
2517 vmulqs(QTMP, qd, qd); | 2587 vmulqs(QTMP, qd, qd); |
2518 vrsqrtsqs(QTMP, qm, QTMP); | 2588 vrsqrtsqs(QTMP, qm, QTMP); |
2519 vmulqs(qd, qd, QTMP); | 2589 vmulqs(qd, qd, QTMP); |
2520 } | 2590 } |
2521 | 2591 |
2522 | 2592 |
2523 void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) { | 2593 void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) { |
2524 ASSERT(temp != QTMP); | 2594 ASSERT(temp != QTMP); |
2525 ASSERT(qm != QTMP); | 2595 ASSERT(qm != QTMP); |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2618 | 2688 |
2619 | 2689 |
2620 void Assembler::BranchLinkOffset(Register base, int32_t offset) { | 2690 void Assembler::BranchLinkOffset(Register base, int32_t offset) { |
2621 ASSERT(base != PC); | 2691 ASSERT(base != PC); |
2622 ASSERT(base != IP); | 2692 ASSERT(base != IP); |
2623 LoadFromOffset(kWord, IP, base, offset); | 2693 LoadFromOffset(kWord, IP, base, offset); |
2624 blx(IP); // Use blx instruction so that the return branch prediction works. | 2694 blx(IP); // Use blx instruction so that the return branch prediction works. |
2625 } | 2695 } |
2626 | 2696 |
2627 | 2697 |
2628 void Assembler::LoadPatchableImmediate( | 2698 void Assembler::LoadPatchableImmediate(Register rd, |
2629 Register rd, int32_t value, Condition cond) { | 2699 int32_t value, |
| 2700 Condition cond) { |
2630 const ARMVersion version = TargetCPUFeatures::arm_version(); | 2701 const ARMVersion version = TargetCPUFeatures::arm_version(); |
2631 if ((version == ARMv5TE) || (version == ARMv6)) { | 2702 if ((version == ARMv5TE) || (version == ARMv6)) { |
2632 // This sequence is patched in a few places, and should remain fixed. | 2703 // This sequence is patched in a few places, and should remain fixed. |
2633 const uint32_t byte0 = (value & 0x000000ff); | 2704 const uint32_t byte0 = (value & 0x000000ff); |
2634 const uint32_t byte1 = (value & 0x0000ff00) >> 8; | 2705 const uint32_t byte1 = (value & 0x0000ff00) >> 8; |
2635 const uint32_t byte2 = (value & 0x00ff0000) >> 16; | 2706 const uint32_t byte2 = (value & 0x00ff0000) >> 16; |
2636 const uint32_t byte3 = (value & 0xff000000) >> 24; | 2707 const uint32_t byte3 = (value & 0xff000000) >> 24; |
2637 mov(rd, Operand(4, byte3), cond); | 2708 mov(rd, Operand(4, byte3), cond); |
2638 orr(rd, rd, Operand(8, byte2), cond); | 2709 orr(rd, rd, Operand(8, byte2), cond); |
2639 orr(rd, rd, Operand(12, byte1), cond); | 2710 orr(rd, rd, Operand(12, byte1), cond); |
2640 orr(rd, rd, Operand(byte0), cond); | 2711 orr(rd, rd, Operand(byte0), cond); |
2641 } else { | 2712 } else { |
2642 ASSERT(version == ARMv7); | 2713 ASSERT(version == ARMv7); |
2643 const uint16_t value_low = Utils::Low16Bits(value); | 2714 const uint16_t value_low = Utils::Low16Bits(value); |
2644 const uint16_t value_high = Utils::High16Bits(value); | 2715 const uint16_t value_high = Utils::High16Bits(value); |
2645 movw(rd, value_low, cond); | 2716 movw(rd, value_low, cond); |
2646 movt(rd, value_high, cond); | 2717 movt(rd, value_high, cond); |
2647 } | 2718 } |
2648 } | 2719 } |
2649 | 2720 |
2650 | 2721 |
2651 void Assembler::LoadDecodableImmediate( | 2722 void Assembler::LoadDecodableImmediate(Register rd, |
2652 Register rd, int32_t value, Condition cond) { | 2723 int32_t value, |
| 2724 Condition cond) { |
2653 const ARMVersion version = TargetCPUFeatures::arm_version(); | 2725 const ARMVersion version = TargetCPUFeatures::arm_version(); |
2654 if ((version == ARMv5TE) || (version == ARMv6)) { | 2726 if ((version == ARMv5TE) || (version == ARMv6)) { |
2655 if (constant_pool_allowed()) { | 2727 if (constant_pool_allowed()) { |
2656 const int32_t offset = Array::element_offset(FindImmediate(value)); | 2728 const int32_t offset = Array::element_offset(FindImmediate(value)); |
2657 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); | 2729 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); |
2658 } else { | 2730 } else { |
2659 LoadPatchableImmediate(rd, value, cond); | 2731 LoadPatchableImmediate(rd, value, cond); |
2660 } | 2732 } |
2661 } else { | 2733 } else { |
2662 ASSERT(version == ARMv7); | 2734 ASSERT(version == ARMv7); |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2846 void Assembler::StoreMultipleDToOffset(DRegister first, | 2918 void Assembler::StoreMultipleDToOffset(DRegister first, |
2847 intptr_t count, | 2919 intptr_t count, |
2848 Register base, | 2920 Register base, |
2849 int32_t offset) { | 2921 int32_t offset) { |
2850 ASSERT(base != IP); | 2922 ASSERT(base != IP); |
2851 AddImmediate(IP, base, offset); | 2923 AddImmediate(IP, base, offset); |
2852 vstmd(IA, IP, first, count); | 2924 vstmd(IA, IP, first, count); |
2853 } | 2925 } |
2854 | 2926 |
2855 | 2927 |
2856 void Assembler::CopyDoubleField( | 2928 void Assembler::CopyDoubleField(Register dst, |
2857 Register dst, Register src, Register tmp1, Register tmp2, DRegister dtmp) { | 2929 Register src, |
| 2930 Register tmp1, |
| 2931 Register tmp2, |
| 2932 DRegister dtmp) { |
2858 if (TargetCPUFeatures::vfp_supported()) { | 2933 if (TargetCPUFeatures::vfp_supported()) { |
2859 LoadDFromOffset(dtmp, src, Double::value_offset() - kHeapObjectTag); | 2934 LoadDFromOffset(dtmp, src, Double::value_offset() - kHeapObjectTag); |
2860 StoreDToOffset(dtmp, dst, Double::value_offset() - kHeapObjectTag); | 2935 StoreDToOffset(dtmp, dst, Double::value_offset() - kHeapObjectTag); |
2861 } else { | 2936 } else { |
2862 LoadFromOffset(kWord, tmp1, src, | 2937 LoadFromOffset(kWord, tmp1, src, Double::value_offset() - kHeapObjectTag); |
2863 Double::value_offset() - kHeapObjectTag); | |
2864 LoadFromOffset(kWord, tmp2, src, | 2938 LoadFromOffset(kWord, tmp2, src, |
2865 Double::value_offset() + kWordSize - kHeapObjectTag); | 2939 Double::value_offset() + kWordSize - kHeapObjectTag); |
2866 StoreToOffset(kWord, tmp1, dst, | 2940 StoreToOffset(kWord, tmp1, dst, Double::value_offset() - kHeapObjectTag); |
2867 Double::value_offset() - kHeapObjectTag); | |
2868 StoreToOffset(kWord, tmp2, dst, | 2941 StoreToOffset(kWord, tmp2, dst, |
2869 Double::value_offset() + kWordSize - kHeapObjectTag); | 2942 Double::value_offset() + kWordSize - kHeapObjectTag); |
2870 } | 2943 } |
2871 } | 2944 } |
2872 | 2945 |
2873 | 2946 |
2874 void Assembler::CopyFloat32x4Field( | 2947 void Assembler::CopyFloat32x4Field(Register dst, |
2875 Register dst, Register src, Register tmp1, Register tmp2, DRegister dtmp) { | 2948 Register src, |
| 2949 Register tmp1, |
| 2950 Register tmp2, |
| 2951 DRegister dtmp) { |
2876 if (TargetCPUFeatures::neon_supported()) { | 2952 if (TargetCPUFeatures::neon_supported()) { |
2877 LoadMultipleDFromOffset(dtmp, 2, src, | 2953 LoadMultipleDFromOffset(dtmp, 2, src, |
2878 Float32x4::value_offset() - kHeapObjectTag); | 2954 Float32x4::value_offset() - kHeapObjectTag); |
2879 StoreMultipleDToOffset(dtmp, 2, dst, | 2955 StoreMultipleDToOffset(dtmp, 2, dst, |
2880 Float32x4::value_offset() - kHeapObjectTag); | 2956 Float32x4::value_offset() - kHeapObjectTag); |
2881 } else { | 2957 } else { |
2882 LoadFromOffset(kWord, tmp1, src, | 2958 LoadFromOffset( |
| 2959 kWord, tmp1, src, |
2883 (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag); | 2960 (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag); |
2884 LoadFromOffset(kWord, tmp2, src, | 2961 LoadFromOffset( |
| 2962 kWord, tmp2, src, |
2885 (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag); | 2963 (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag); |
2886 StoreToOffset(kWord, tmp1, dst, | 2964 StoreToOffset(kWord, tmp1, dst, |
2887 (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag); | 2965 (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag); |
2888 StoreToOffset(kWord, tmp2, dst, | 2966 StoreToOffset(kWord, tmp2, dst, |
2889 (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag); | 2967 (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag); |
2890 | 2968 |
2891 LoadFromOffset(kWord, tmp1, src, | 2969 LoadFromOffset( |
| 2970 kWord, tmp1, src, |
2892 (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag); | 2971 (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag); |
2893 LoadFromOffset(kWord, tmp2, src, | 2972 LoadFromOffset( |
| 2973 kWord, tmp2, src, |
2894 (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag); | 2974 (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag); |
2895 StoreToOffset(kWord, tmp1, dst, | 2975 StoreToOffset(kWord, tmp1, dst, |
2896 (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag); | 2976 (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag); |
2897 StoreToOffset(kWord, tmp2, dst, | 2977 StoreToOffset(kWord, tmp2, dst, |
2898 (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag); | 2978 (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag); |
2899 } | 2979 } |
2900 } | 2980 } |
2901 | 2981 |
2902 | 2982 |
2903 void Assembler::CopyFloat64x2Field( | 2983 void Assembler::CopyFloat64x2Field(Register dst, |
2904 Register dst, Register src, Register tmp1, Register tmp2, DRegister dtmp) { | 2984 Register src, |
| 2985 Register tmp1, |
| 2986 Register tmp2, |
| 2987 DRegister dtmp) { |
2905 if (TargetCPUFeatures::neon_supported()) { | 2988 if (TargetCPUFeatures::neon_supported()) { |
2906 LoadMultipleDFromOffset(dtmp, 2, src, | 2989 LoadMultipleDFromOffset(dtmp, 2, src, |
2907 Float64x2::value_offset() - kHeapObjectTag); | 2990 Float64x2::value_offset() - kHeapObjectTag); |
2908 StoreMultipleDToOffset(dtmp, 2, dst, | 2991 StoreMultipleDToOffset(dtmp, 2, dst, |
2909 Float64x2::value_offset() - kHeapObjectTag); | 2992 Float64x2::value_offset() - kHeapObjectTag); |
2910 } else { | 2993 } else { |
2911 LoadFromOffset(kWord, tmp1, src, | 2994 LoadFromOffset( |
| 2995 kWord, tmp1, src, |
2912 (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag); | 2996 (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag); |
2913 LoadFromOffset(kWord, tmp2, src, | 2997 LoadFromOffset( |
| 2998 kWord, tmp2, src, |
2914 (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag); | 2999 (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag); |
2915 StoreToOffset(kWord, tmp1, dst, | 3000 StoreToOffset(kWord, tmp1, dst, |
2916 (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag); | 3001 (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag); |
2917 StoreToOffset(kWord, tmp2, dst, | 3002 StoreToOffset(kWord, tmp2, dst, |
2918 (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag); | 3003 (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag); |
2919 | 3004 |
2920 LoadFromOffset(kWord, tmp1, src, | 3005 LoadFromOffset( |
| 3006 kWord, tmp1, src, |
2921 (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag); | 3007 (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag); |
2922 LoadFromOffset(kWord, tmp2, src, | 3008 LoadFromOffset( |
| 3009 kWord, tmp2, src, |
2923 (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag); | 3010 (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag); |
2924 StoreToOffset(kWord, tmp1, dst, | 3011 StoreToOffset(kWord, tmp1, dst, |
2925 (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag); | 3012 (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag); |
2926 StoreToOffset(kWord, tmp2, dst, | 3013 StoreToOffset(kWord, tmp2, dst, |
2927 (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag); | 3014 (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag); |
2928 } | 3015 } |
2929 } | 3016 } |
2930 | 3017 |
2931 | 3018 |
2932 void Assembler::AddImmediate(Register rd, int32_t value, Condition cond) { | 3019 void Assembler::AddImmediate(Register rd, int32_t value, Condition cond) { |
2933 AddImmediate(rd, rd, value, cond); | 3020 AddImmediate(rd, rd, value, cond); |
2934 } | 3021 } |
2935 | 3022 |
2936 | 3023 |
2937 void Assembler::AddImmediate(Register rd, Register rn, int32_t value, | 3024 void Assembler::AddImmediate(Register rd, |
| 3025 Register rn, |
| 3026 int32_t value, |
2938 Condition cond) { | 3027 Condition cond) { |
2939 if (value == 0) { | 3028 if (value == 0) { |
2940 if (rd != rn) { | 3029 if (rd != rn) { |
2941 mov(rd, Operand(rn), cond); | 3030 mov(rd, Operand(rn), cond); |
2942 } | 3031 } |
2943 return; | 3032 return; |
2944 } | 3033 } |
2945 // We prefer to select the shorter code sequence rather than selecting add for | 3034 // We prefer to select the shorter code sequence rather than selecting add for |
2946 // positive values and sub for negatives ones, which would slightly improve | 3035 // positive values and sub for negatives ones, which would slightly improve |
2947 // the readability of generated code for some constants. | 3036 // the readability of generated code for some constants. |
(...skipping 11 matching lines...) Expand all Loading... |
2959 mvn(IP, o, cond); | 3048 mvn(IP, o, cond); |
2960 sub(rd, rn, Operand(IP), cond); | 3049 sub(rd, rn, Operand(IP), cond); |
2961 } else { | 3050 } else { |
2962 LoadDecodableImmediate(IP, value, cond); | 3051 LoadDecodableImmediate(IP, value, cond); |
2963 add(rd, rn, Operand(IP), cond); | 3052 add(rd, rn, Operand(IP), cond); |
2964 } | 3053 } |
2965 } | 3054 } |
2966 } | 3055 } |
2967 | 3056 |
2968 | 3057 |
2969 void Assembler::AddImmediateSetFlags(Register rd, Register rn, int32_t value, | 3058 void Assembler::AddImmediateSetFlags(Register rd, |
| 3059 Register rn, |
| 3060 int32_t value, |
2970 Condition cond) { | 3061 Condition cond) { |
2971 Operand o; | 3062 Operand o; |
2972 if (Operand::CanHold(value, &o)) { | 3063 if (Operand::CanHold(value, &o)) { |
2973 // Handles value == kMinInt32. | 3064 // Handles value == kMinInt32. |
2974 adds(rd, rn, o, cond); | 3065 adds(rd, rn, o, cond); |
2975 } else if (Operand::CanHold(-value, &o)) { | 3066 } else if (Operand::CanHold(-value, &o)) { |
2976 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. | 3067 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
2977 subs(rd, rn, o, cond); | 3068 subs(rd, rn, o, cond); |
2978 } else { | 3069 } else { |
2979 ASSERT(rn != IP); | 3070 ASSERT(rn != IP); |
2980 if (Operand::CanHold(~value, &o)) { | 3071 if (Operand::CanHold(~value, &o)) { |
2981 mvn(IP, o, cond); | 3072 mvn(IP, o, cond); |
2982 adds(rd, rn, Operand(IP), cond); | 3073 adds(rd, rn, Operand(IP), cond); |
2983 } else if (Operand::CanHold(~(-value), &o)) { | 3074 } else if (Operand::CanHold(~(-value), &o)) { |
2984 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. | 3075 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
2985 mvn(IP, o, cond); | 3076 mvn(IP, o, cond); |
2986 subs(rd, rn, Operand(IP), cond); | 3077 subs(rd, rn, Operand(IP), cond); |
2987 } else { | 3078 } else { |
2988 LoadDecodableImmediate(IP, value, cond); | 3079 LoadDecodableImmediate(IP, value, cond); |
2989 adds(rd, rn, Operand(IP), cond); | 3080 adds(rd, rn, Operand(IP), cond); |
2990 } | 3081 } |
2991 } | 3082 } |
2992 } | 3083 } |
2993 | 3084 |
2994 | 3085 |
2995 void Assembler::SubImmediateSetFlags(Register rd, Register rn, int32_t value, | 3086 void Assembler::SubImmediateSetFlags(Register rd, |
2996 Condition cond) { | 3087 Register rn, |
| 3088 int32_t value, |
| 3089 Condition cond) { |
2997 Operand o; | 3090 Operand o; |
2998 if (Operand::CanHold(value, &o)) { | 3091 if (Operand::CanHold(value, &o)) { |
2999 // Handles value == kMinInt32. | 3092 // Handles value == kMinInt32. |
3000 subs(rd, rn, o, cond); | 3093 subs(rd, rn, o, cond); |
3001 } else if (Operand::CanHold(-value, &o)) { | 3094 } else if (Operand::CanHold(-value, &o)) { |
3002 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. | 3095 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
3003 adds(rd, rn, o, cond); | 3096 adds(rd, rn, o, cond); |
3004 } else { | 3097 } else { |
3005 ASSERT(rn != IP); | 3098 ASSERT(rn != IP); |
3006 if (Operand::CanHold(~value, &o)) { | 3099 if (Operand::CanHold(~value, &o)) { |
3007 mvn(IP, o, cond); | 3100 mvn(IP, o, cond); |
3008 subs(rd, rn, Operand(IP), cond); | 3101 subs(rd, rn, Operand(IP), cond); |
3009 } else if (Operand::CanHold(~(-value), &o)) { | 3102 } else if (Operand::CanHold(~(-value), &o)) { |
3010 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. | 3103 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
3011 mvn(IP, o, cond); | 3104 mvn(IP, o, cond); |
3012 adds(rd, rn, Operand(IP), cond); | 3105 adds(rd, rn, Operand(IP), cond); |
3013 } else { | 3106 } else { |
3014 LoadDecodableImmediate(IP, value, cond); | 3107 LoadDecodableImmediate(IP, value, cond); |
3015 subs(rd, rn, Operand(IP), cond); | 3108 subs(rd, rn, Operand(IP), cond); |
3016 } | 3109 } |
3017 } | 3110 } |
3018 } | 3111 } |
3019 | 3112 |
3020 | 3113 |
3021 void Assembler::AndImmediate(Register rd, Register rs, int32_t imm, | 3114 void Assembler::AndImmediate(Register rd, |
| 3115 Register rs, |
| 3116 int32_t imm, |
3022 Condition cond) { | 3117 Condition cond) { |
3023 Operand o; | 3118 Operand o; |
3024 if (Operand::CanHold(imm, &o)) { | 3119 if (Operand::CanHold(imm, &o)) { |
3025 and_(rd, rs, Operand(o), cond); | 3120 and_(rd, rs, Operand(o), cond); |
3026 } else { | 3121 } else { |
3027 LoadImmediate(TMP, imm, cond); | 3122 LoadImmediate(TMP, imm, cond); |
3028 and_(rd, rs, Operand(TMP), cond); | 3123 and_(rd, rs, Operand(TMP), cond); |
3029 } | 3124 } |
3030 } | 3125 } |
3031 | 3126 |
(...skipping 13 matching lines...) Expand all Loading... |
3045 void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) { | 3140 void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) { |
3046 Operand o; | 3141 Operand o; |
3047 if (Operand::CanHold(imm, &o)) { | 3142 if (Operand::CanHold(imm, &o)) { |
3048 tst(rn, o, cond); | 3143 tst(rn, o, cond); |
3049 } else { | 3144 } else { |
3050 LoadImmediate(IP, imm); | 3145 LoadImmediate(IP, imm); |
3051 tst(rn, Operand(IP), cond); | 3146 tst(rn, Operand(IP), cond); |
3052 } | 3147 } |
3053 } | 3148 } |
3054 | 3149 |
3055 void Assembler::IntegerDivide(Register result, Register left, Register right, | 3150 void Assembler::IntegerDivide(Register result, |
3056 DRegister tmpl, DRegister tmpr) { | 3151 Register left, |
| 3152 Register right, |
| 3153 DRegister tmpl, |
| 3154 DRegister tmpr) { |
3057 ASSERT(tmpl != tmpr); | 3155 ASSERT(tmpl != tmpr); |
3058 if (TargetCPUFeatures::integer_division_supported()) { | 3156 if (TargetCPUFeatures::integer_division_supported()) { |
3059 sdiv(result, left, right); | 3157 sdiv(result, left, right); |
3060 } else { | 3158 } else { |
3061 ASSERT(TargetCPUFeatures::vfp_supported()); | 3159 ASSERT(TargetCPUFeatures::vfp_supported()); |
3062 SRegister stmpl = static_cast<SRegister>(2 * tmpl); | 3160 SRegister stmpl = static_cast<SRegister>(2 * tmpl); |
3063 SRegister stmpr = static_cast<SRegister>(2 * tmpr); | 3161 SRegister stmpr = static_cast<SRegister>(2 * tmpr); |
3064 vmovsr(stmpl, left); | 3162 vmovsr(stmpl, left); |
3065 vcvtdi(tmpl, stmpl); // left is in tmpl. | 3163 vcvtdi(tmpl, stmpl); // left is in tmpl. |
3066 vmovsr(stmpr, right); | 3164 vmovsr(stmpr, right); |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3144 | 3242 |
3145 ReserveAlignedFrameSpace(frame_space); | 3243 ReserveAlignedFrameSpace(frame_space); |
3146 } | 3244 } |
3147 | 3245 |
3148 | 3246 |
3149 void Assembler::LeaveCallRuntimeFrame() { | 3247 void Assembler::LeaveCallRuntimeFrame() { |
3150 // SP might have been modified to reserve space for arguments | 3248 // SP might have been modified to reserve space for arguments |
3151 // and ensure proper alignment of the stack frame. | 3249 // and ensure proper alignment of the stack frame. |
3152 // We need to restore it before restoring registers. | 3250 // We need to restore it before restoring registers. |
3153 const intptr_t kPushedFpuRegisterSize = | 3251 const intptr_t kPushedFpuRegisterSize = |
3154 TargetCPUFeatures::vfp_supported() ? | 3252 TargetCPUFeatures::vfp_supported() |
3155 kDartVolatileFpuRegCount * kFpuRegisterSize : 0; | 3253 ? kDartVolatileFpuRegCount * kFpuRegisterSize |
| 3254 : 0; |
3156 | 3255 |
3157 COMPILE_ASSERT(PP < FP); | 3256 COMPILE_ASSERT(PP < FP); |
3158 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0); | 3257 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0); |
3159 // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile, | 3258 // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile, |
3160 // it is pushed ahead of FP. | 3259 // it is pushed ahead of FP. |
3161 const intptr_t kPushedRegistersSize = | 3260 const intptr_t kPushedRegistersSize = |
3162 kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize; | 3261 kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize; |
3163 AddImmediate(SP, FP, -kPushedRegistersSize); | 3262 AddImmediate(SP, FP, -kPushedRegistersSize); |
3164 | 3263 |
3165 // Restore all volatile FPU registers. | 3264 // Restore all volatile FPU registers. |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3275 Register temp_reg, | 3374 Register temp_reg, |
3276 Label* trace) { | 3375 Label* trace) { |
3277 LoadAllocationStatsAddress(temp_reg, cid); | 3376 LoadAllocationStatsAddress(temp_reg, cid); |
3278 const uword state_offset = ClassHeapStats::state_offset(); | 3377 const uword state_offset = ClassHeapStats::state_offset(); |
3279 ldr(temp_reg, Address(temp_reg, state_offset)); | 3378 ldr(temp_reg, Address(temp_reg, state_offset)); |
3280 tst(temp_reg, Operand(ClassHeapStats::TraceAllocationMask())); | 3379 tst(temp_reg, Operand(ClassHeapStats::TraceAllocationMask())); |
3281 b(trace, NE); | 3380 b(trace, NE); |
3282 } | 3381 } |
3283 | 3382 |
3284 | 3383 |
3285 void Assembler::LoadAllocationStatsAddress(Register dest, | 3384 void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid) { |
3286 intptr_t cid) { | |
3287 ASSERT(dest != kNoRegister); | 3385 ASSERT(dest != kNoRegister); |
3288 ASSERT(dest != TMP); | 3386 ASSERT(dest != TMP); |
3289 ASSERT(cid > 0); | 3387 ASSERT(cid > 0); |
3290 const intptr_t class_offset = ClassTable::ClassOffsetFor(cid); | 3388 const intptr_t class_offset = ClassTable::ClassOffsetFor(cid); |
3291 LoadIsolate(dest); | 3389 LoadIsolate(dest); |
3292 intptr_t table_offset = | 3390 intptr_t table_offset = |
3293 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 3391 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
3294 ldr(dest, Address(dest, table_offset)); | 3392 ldr(dest, Address(dest, table_offset)); |
3295 AddImmediate(dest, class_offset); | 3393 AddImmediate(dest, class_offset); |
3296 } | 3394 } |
3297 | 3395 |
3298 | 3396 |
3299 void Assembler::IncrementAllocationStats(Register stats_addr_reg, | 3397 void Assembler::IncrementAllocationStats(Register stats_addr_reg, |
3300 intptr_t cid, | 3398 intptr_t cid, |
3301 Heap::Space space) { | 3399 Heap::Space space) { |
3302 ASSERT(stats_addr_reg != kNoRegister); | 3400 ASSERT(stats_addr_reg != kNoRegister); |
3303 ASSERT(stats_addr_reg != TMP); | 3401 ASSERT(stats_addr_reg != TMP); |
3304 ASSERT(cid > 0); | 3402 ASSERT(cid > 0); |
3305 const uword count_field_offset = (space == Heap::kNew) ? | 3403 const uword count_field_offset = |
3306 ClassHeapStats::allocated_since_gc_new_space_offset() : | 3404 (space == Heap::kNew) |
3307 ClassHeapStats::allocated_since_gc_old_space_offset(); | 3405 ? ClassHeapStats::allocated_since_gc_new_space_offset() |
| 3406 : ClassHeapStats::allocated_since_gc_old_space_offset(); |
3308 const Address& count_address = Address(stats_addr_reg, count_field_offset); | 3407 const Address& count_address = Address(stats_addr_reg, count_field_offset); |
3309 ldr(TMP, count_address); | 3408 ldr(TMP, count_address); |
3310 AddImmediate(TMP, 1); | 3409 AddImmediate(TMP, 1); |
3311 str(TMP, count_address); | 3410 str(TMP, count_address); |
3312 } | 3411 } |
3313 | 3412 |
3314 | 3413 |
3315 void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg, | 3414 void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg, |
3316 Register size_reg, | 3415 Register size_reg, |
3317 Heap::Space space) { | 3416 Heap::Space space) { |
3318 ASSERT(stats_addr_reg != kNoRegister); | 3417 ASSERT(stats_addr_reg != kNoRegister); |
3319 ASSERT(stats_addr_reg != TMP); | 3418 ASSERT(stats_addr_reg != TMP); |
3320 const uword count_field_offset = (space == Heap::kNew) ? | 3419 const uword count_field_offset = |
3321 ClassHeapStats::allocated_since_gc_new_space_offset() : | 3420 (space == Heap::kNew) |
3322 ClassHeapStats::allocated_since_gc_old_space_offset(); | 3421 ? ClassHeapStats::allocated_since_gc_new_space_offset() |
3323 const uword size_field_offset = (space == Heap::kNew) ? | 3422 : ClassHeapStats::allocated_since_gc_old_space_offset(); |
3324 ClassHeapStats::allocated_size_since_gc_new_space_offset() : | 3423 const uword size_field_offset = |
3325 ClassHeapStats::allocated_size_since_gc_old_space_offset(); | 3424 (space == Heap::kNew) |
| 3425 ? ClassHeapStats::allocated_size_since_gc_new_space_offset() |
| 3426 : ClassHeapStats::allocated_size_since_gc_old_space_offset(); |
3326 const Address& count_address = Address(stats_addr_reg, count_field_offset); | 3427 const Address& count_address = Address(stats_addr_reg, count_field_offset); |
3327 const Address& size_address = Address(stats_addr_reg, size_field_offset); | 3428 const Address& size_address = Address(stats_addr_reg, size_field_offset); |
3328 ldr(TMP, count_address); | 3429 ldr(TMP, count_address); |
3329 AddImmediate(TMP, 1); | 3430 AddImmediate(TMP, 1); |
3330 str(TMP, count_address); | 3431 str(TMP, count_address); |
3331 ldr(TMP, size_address); | 3432 ldr(TMP, size_address); |
3332 add(TMP, TMP, Operand(size_reg)); | 3433 add(TMP, TMP, Operand(size_reg)); |
3333 str(TMP, size_address); | 3434 str(TMP, size_address); |
3334 } | 3435 } |
3335 #endif // !PRODUCT | 3436 #endif // !PRODUCT |
3336 | 3437 |
3337 | 3438 |
3338 void Assembler::TryAllocate(const Class& cls, | 3439 void Assembler::TryAllocate(const Class& cls, |
3339 Label* failure, | 3440 Label* failure, |
3340 Register instance_reg, | 3441 Register instance_reg, |
3341 Register temp_reg) { | 3442 Register temp_reg) { |
3342 ASSERT(failure != NULL); | 3443 ASSERT(failure != NULL); |
3343 if (FLAG_inline_alloc) { | 3444 if (FLAG_inline_alloc) { |
3344 ASSERT(instance_reg != temp_reg); | 3445 ASSERT(instance_reg != temp_reg); |
3345 ASSERT(temp_reg != IP); | 3446 ASSERT(temp_reg != IP); |
3346 const intptr_t instance_size = cls.instance_size(); | 3447 const intptr_t instance_size = cls.instance_size(); |
3347 ASSERT(instance_size != 0); | 3448 ASSERT(instance_size != 0); |
3348 // If this allocation is traced, program will jump to failure path | 3449 // If this allocation is traced, program will jump to failure path |
3349 // (i.e. the allocation stub) which will allocate the object and trace the | 3450 // (i.e. the allocation stub) which will allocate the object and trace the |
3350 // allocation call site. | 3451 // allocation call site. |
3351 NOT_IN_PRODUCT( | 3452 NOT_IN_PRODUCT(MaybeTraceAllocation(cls.id(), temp_reg, failure)); |
3352 MaybeTraceAllocation(cls.id(), temp_reg, failure)); | |
3353 Heap::Space space = Heap::kNew; | 3453 Heap::Space space = Heap::kNew; |
3354 ldr(temp_reg, Address(THR, Thread::heap_offset())); | 3454 ldr(temp_reg, Address(THR, Thread::heap_offset())); |
3355 ldr(instance_reg, Address(temp_reg, Heap::TopOffset(space))); | 3455 ldr(instance_reg, Address(temp_reg, Heap::TopOffset(space))); |
3356 // TODO(koda): Protect against unsigned overflow here. | 3456 // TODO(koda): Protect against unsigned overflow here. |
3357 AddImmediateSetFlags(instance_reg, instance_reg, instance_size); | 3457 AddImmediateSetFlags(instance_reg, instance_reg, instance_size); |
3358 | 3458 |
3359 // instance_reg: potential next object start. | 3459 // instance_reg: potential next object start. |
3360 ldr(IP, Address(temp_reg, Heap::EndOffset(space))); | 3460 ldr(IP, Address(temp_reg, Heap::EndOffset(space))); |
3361 cmp(IP, Operand(instance_reg)); | 3461 cmp(IP, Operand(instance_reg)); |
3362 // fail if heap end unsigned less than or equal to instance_reg. | 3462 // fail if heap end unsigned less than or equal to instance_reg. |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3455 | 3555 |
3456 Address Assembler::ElementAddressForIntIndex(bool is_load, | 3556 Address Assembler::ElementAddressForIntIndex(bool is_load, |
3457 bool is_external, | 3557 bool is_external, |
3458 intptr_t cid, | 3558 intptr_t cid, |
3459 intptr_t index_scale, | 3559 intptr_t index_scale, |
3460 Register array, | 3560 Register array, |
3461 intptr_t index, | 3561 intptr_t index, |
3462 Register temp) { | 3562 Register temp) { |
3463 const int64_t offset_base = | 3563 const int64_t offset_base = |
3464 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | 3564 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
3465 const int64_t offset = offset_base + | 3565 const int64_t offset = |
3466 static_cast<int64_t>(index) * index_scale; | 3566 offset_base + static_cast<int64_t>(index) * index_scale; |
3467 ASSERT(Utils::IsInt(32, offset)); | 3567 ASSERT(Utils::IsInt(32, offset)); |
3468 | 3568 |
3469 if (Address::CanHoldImmediateOffset(is_load, cid, offset)) { | 3569 if (Address::CanHoldImmediateOffset(is_load, cid, offset)) { |
3470 return Address(array, static_cast<int32_t>(offset)); | 3570 return Address(array, static_cast<int32_t>(offset)); |
3471 } else { | 3571 } else { |
3472 ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base)); | 3572 ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base)); |
3473 AddImmediate(temp, array, static_cast<int32_t>(offset_base)); | 3573 AddImmediate(temp, array, static_cast<int32_t>(offset_base)); |
3474 return Address(temp, static_cast<int32_t>(offset - offset_base)); | 3574 return Address(temp, static_cast<int32_t>(offset - offset_base)); |
3475 } | 3575 } |
3476 } | 3576 } |
3477 | 3577 |
3478 | 3578 |
3479 void Assembler::LoadElementAddressForIntIndex(Register address, | 3579 void Assembler::LoadElementAddressForIntIndex(Register address, |
3480 bool is_load, | 3580 bool is_load, |
3481 bool is_external, | 3581 bool is_external, |
3482 intptr_t cid, | 3582 intptr_t cid, |
3483 intptr_t index_scale, | 3583 intptr_t index_scale, |
3484 Register array, | 3584 Register array, |
3485 intptr_t index) { | 3585 intptr_t index) { |
3486 const int64_t offset_base = | 3586 const int64_t offset_base = |
3487 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | 3587 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
3488 const int64_t offset = offset_base + | 3588 const int64_t offset = |
3489 static_cast<int64_t>(index) * index_scale; | 3589 offset_base + static_cast<int64_t>(index) * index_scale; |
3490 ASSERT(Utils::IsInt(32, offset)); | 3590 ASSERT(Utils::IsInt(32, offset)); |
3491 AddImmediate(address, array, offset); | 3591 AddImmediate(address, array, offset); |
3492 } | 3592 } |
3493 | 3593 |
3494 | 3594 |
3495 Address Assembler::ElementAddressForRegIndex(bool is_load, | 3595 Address Assembler::ElementAddressForRegIndex(bool is_load, |
3496 bool is_external, | 3596 bool is_external, |
3497 intptr_t cid, | 3597 intptr_t cid, |
3498 intptr_t index_scale, | 3598 intptr_t index_scale, |
3499 Register array, | 3599 Register array, |
3500 Register index) { | 3600 Register index) { |
3501 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. | 3601 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. |
3502 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; | 3602 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; |
3503 int32_t offset = | 3603 int32_t offset = |
3504 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); | 3604 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); |
3505 const OperandSize size = Address::OperandSizeFor(cid); | 3605 const OperandSize size = Address::OperandSizeFor(cid); |
3506 ASSERT(array != IP); | 3606 ASSERT(array != IP); |
3507 ASSERT(index != IP); | 3607 ASSERT(index != IP); |
3508 const Register base = is_load ? IP : index; | 3608 const Register base = is_load ? IP : index; |
3509 if ((offset != 0) || | 3609 if ((offset != 0) || (size == kSWord) || (size == kDWord) || |
3510 (size == kSWord) || (size == kDWord) || (size == kRegList)) { | 3610 (size == kRegList)) { |
3511 if (shift < 0) { | 3611 if (shift < 0) { |
3512 ASSERT(shift == -1); | 3612 ASSERT(shift == -1); |
3513 add(base, array, Operand(index, ASR, 1)); | 3613 add(base, array, Operand(index, ASR, 1)); |
3514 } else { | 3614 } else { |
3515 add(base, array, Operand(index, LSL, shift)); | 3615 add(base, array, Operand(index, LSL, shift)); |
3516 } | 3616 } |
3517 } else { | 3617 } else { |
3518 if (shift < 0) { | 3618 if (shift < 0) { |
3519 ASSERT(shift == -1); | 3619 ASSERT(shift == -1); |
3520 return Address(array, index, ASR, 1); | 3620 return Address(array, index, ASR, 1); |
3521 } else { | 3621 } else { |
3522 return Address(array, index, LSL, shift); | 3622 return Address(array, index, LSL, shift); |
3523 } | 3623 } |
3524 } | 3624 } |
3525 int32_t offset_mask = 0; | 3625 int32_t offset_mask = 0; |
3526 if ((is_load && !Address::CanHoldLoadOffset(size, | 3626 if ((is_load && !Address::CanHoldLoadOffset(size, offset, &offset_mask)) || |
3527 offset, | 3627 (!is_load && !Address::CanHoldStoreOffset(size, offset, &offset_mask))) { |
3528 &offset_mask)) || | |
3529 (!is_load && !Address::CanHoldStoreOffset(size, | |
3530 offset, | |
3531 &offset_mask))) { | |
3532 AddImmediate(base, offset & ~offset_mask); | 3628 AddImmediate(base, offset & ~offset_mask); |
3533 offset = offset & offset_mask; | 3629 offset = offset & offset_mask; |
3534 } | 3630 } |
3535 return Address(base, offset); | 3631 return Address(base, offset); |
3536 } | 3632 } |
3537 | 3633 |
3538 | 3634 |
3539 void Assembler::LoadElementAddressForRegIndex(Register address, | 3635 void Assembler::LoadElementAddressForRegIndex(Register address, |
3540 bool is_load, | 3636 bool is_load, |
3541 bool is_external, | 3637 bool is_external, |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3605 Lsr(tmp, src, Operand(8)); | 3701 Lsr(tmp, src, Operand(8)); |
3606 strb(tmp, Address(addr, 1)); | 3702 strb(tmp, Address(addr, 1)); |
3607 Lsr(tmp, src, Operand(16)); | 3703 Lsr(tmp, src, Operand(16)); |
3608 strb(tmp, Address(addr, 2)); | 3704 strb(tmp, Address(addr, 2)); |
3609 Lsr(tmp, src, Operand(24)); | 3705 Lsr(tmp, src, Operand(24)); |
3610 strb(tmp, Address(addr, 3)); | 3706 strb(tmp, Address(addr, 3)); |
3611 } | 3707 } |
3612 | 3708 |
3613 | 3709 |
3614 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { | 3710 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { |
3615 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | 3711 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", |
3616 "r8", "ctx", "pp", "fp", "ip", "sp", "lr", "pc", | 3712 "r8", "ctx", "pp", "fp", "ip", "sp", "lr", "pc", |
3617 }; | 3713 }; |
3618 | 3714 |
3619 | 3715 |
3620 const char* Assembler::RegisterName(Register reg) { | 3716 const char* Assembler::RegisterName(Register reg) { |
3621 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); | 3717 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); |
3622 return cpu_reg_names[reg]; | 3718 return cpu_reg_names[reg]; |
3623 } | 3719 } |
3624 | 3720 |
3625 | 3721 |
3626 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { | 3722 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { |
3627 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", | 3723 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", |
3628 #if defined(VFPv3_D32) | 3724 #if defined(VFPv3_D32) |
3629 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", | 3725 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", |
3630 #endif | 3726 #endif |
3631 }; | 3727 }; |
3632 | 3728 |
3633 | 3729 |
3634 const char* Assembler::FpuRegisterName(FpuRegister reg) { | 3730 const char* Assembler::FpuRegisterName(FpuRegister reg) { |
3635 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); | 3731 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); |
3636 return fpu_reg_names[reg]; | 3732 return fpu_reg_names[reg]; |
3637 } | 3733 } |
3638 | 3734 |
3639 } // namespace dart | 3735 } // namespace dart |
3640 | 3736 |
3641 #endif // defined TARGET_ARCH_ARM | 3737 #endif // defined TARGET_ARCH_ARM |
OLD | NEW |