OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 | 9 |
10 namespace dart { | 10 namespace dart { |
11 | 11 |
12 DEFINE_FLAG(bool, print_stop_message, true, "Print stop message."); | 12 DEFINE_FLAG(bool, print_stop_message, true, "Print stop message."); |
13 DEFINE_FLAG(bool, code_comments, false, | 13 |
14 "Include comments into code and disassembly"); | 14 |
| 15 // Instruction encoding bits. |
| 16 enum { |
| 17 H = 1 << 5, // halfword (or byte) |
| 18 L = 1 << 20, // load (or store) |
| 19 S = 1 << 20, // set condition code (or leave unchanged) |
| 20 W = 1 << 21, // writeback base register (or leave unchanged) |
| 21 A = 1 << 21, // accumulate in multiply instruction (or not) |
| 22 B = 1 << 22, // unsigned byte (or word) |
| 23 N = 1 << 22, // long (or short) |
| 24 U = 1 << 23, // positive (or negative) offset/index |
| 25 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing) |
| 26 I = 1 << 25, // immediate shifter operand (or not) |
| 27 |
| 28 B0 = 1, |
| 29 B1 = 1 << 1, |
| 30 B2 = 1 << 2, |
| 31 B3 = 1 << 3, |
| 32 B4 = 1 << 4, |
| 33 B5 = 1 << 5, |
| 34 B6 = 1 << 6, |
| 35 B7 = 1 << 7, |
| 36 B8 = 1 << 8, |
| 37 B9 = 1 << 9, |
| 38 B10 = 1 << 10, |
| 39 B11 = 1 << 11, |
| 40 B12 = 1 << 12, |
| 41 B16 = 1 << 16, |
| 42 B17 = 1 << 17, |
| 43 B18 = 1 << 18, |
| 44 B19 = 1 << 19, |
| 45 B20 = 1 << 20, |
| 46 B21 = 1 << 21, |
| 47 B22 = 1 << 22, |
| 48 B23 = 1 << 23, |
| 49 B24 = 1 << 24, |
| 50 B25 = 1 << 25, |
| 51 B26 = 1 << 26, |
| 52 B27 = 1 << 27, |
| 53 |
| 54 // ldrex/strex register field encodings. |
| 55 kLdExRnShift = 16, |
| 56 kLdExRtShift = 12, |
| 57 kStrExRnShift = 16, |
| 58 kStrExRdShift = 12, |
| 59 kStrExRtShift = 0, |
| 60 }; |
| 61 |
| 62 |
| 63 uint32_t Address::encoding3() const { |
| 64 const uint32_t offset_mask = (1 << 12) - 1; |
| 65 uint32_t offset = encoding_ & offset_mask; |
| 66 ASSERT(offset < 256); |
| 67 return (encoding_ & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf); |
| 68 } |
| 69 |
| 70 |
| 71 uint32_t Address::vencoding() const { |
| 72 const uint32_t offset_mask = (1 << 12) - 1; |
| 73 uint32_t offset = encoding_ & offset_mask; |
| 74 ASSERT(offset < (1 << 10)); // In the range 0 to +1020. |
| 75 ASSERT(Utils::Utils::IsAligned(offset, 2)); // Multiple of 4. |
| 76 int mode = encoding_ & ((8|4|1) << 21); |
| 77 ASSERT((mode == Offset) || (mode == NegOffset)); |
| 78 uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2); |
| 79 if (mode == Offset) { |
| 80 vencoding |= 1 << 23; |
| 81 } |
| 82 return vencoding; |
| 83 } |
| 84 |
| 85 |
| 86 void Assembler::Emit(int32_t value) { |
| 87 AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| 88 buffer_.Emit<int32_t>(value); |
| 89 } |
| 90 |
| 91 |
| 92 void Assembler::EmitType01(Condition cond, |
| 93 int type, |
| 94 Opcode opcode, |
| 95 int set_cc, |
| 96 Register rn, |
| 97 Register rd, |
| 98 ShifterOperand so) { |
| 99 ASSERT(rd != kNoRegister); |
| 100 ASSERT(cond != kNoCondition); |
| 101 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
| 102 type << kTypeShift | |
| 103 static_cast<int32_t>(opcode) << kOpcodeShift | |
| 104 set_cc << kSShift | |
| 105 static_cast<int32_t>(rn) << kRnShift | |
| 106 static_cast<int32_t>(rd) << kRdShift | |
| 107 so.encoding(); |
| 108 Emit(encoding); |
| 109 } |
| 110 |
| 111 |
| 112 void Assembler::EmitType5(Condition cond, int offset, bool link) { |
| 113 ASSERT(cond != kNoCondition); |
| 114 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
| 115 5 << kTypeShift | |
| 116 (link ? 1 : 0) << kLinkShift; |
| 117 Emit(Assembler::EncodeBranchOffset(offset, encoding)); |
| 118 } |
| 119 |
| 120 |
| 121 void Assembler::EmitMemOp(Condition cond, |
| 122 bool load, |
| 123 bool byte, |
| 124 Register rd, |
| 125 Address ad) { |
| 126 ASSERT(rd != kNoRegister); |
| 127 ASSERT(cond != kNoCondition); |
| 128 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 129 B26 | |
| 130 (load ? L : 0) | |
| 131 (byte ? B : 0) | |
| 132 (static_cast<int32_t>(rd) << kRdShift) | |
| 133 ad.encoding(); |
| 134 Emit(encoding); |
| 135 } |
| 136 |
| 137 |
| 138 void Assembler::EmitMemOpAddressMode3(Condition cond, |
| 139 int32_t mode, |
| 140 Register rd, |
| 141 Address ad) { |
| 142 ASSERT(rd != kNoRegister); |
| 143 ASSERT(cond != kNoCondition); |
| 144 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 145 B22 | |
| 146 mode | |
| 147 (static_cast<int32_t>(rd) << kRdShift) | |
| 148 ad.encoding3(); |
| 149 Emit(encoding); |
| 150 } |
| 151 |
| 152 |
| 153 void Assembler::EmitMultiMemOp(Condition cond, |
| 154 BlockAddressMode am, |
| 155 bool load, |
| 156 Register base, |
| 157 RegList regs) { |
| 158 ASSERT(base != kNoRegister); |
| 159 ASSERT(cond != kNoCondition); |
| 160 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 161 B27 | |
| 162 am | |
| 163 (load ? L : 0) | |
| 164 (static_cast<int32_t>(base) << kRnShift) | |
| 165 regs; |
| 166 Emit(encoding); |
| 167 } |
| 168 |
| 169 |
| 170 void Assembler::EmitShiftImmediate(Condition cond, |
| 171 Shift opcode, |
| 172 Register rd, |
| 173 Register rm, |
| 174 ShifterOperand so) { |
| 175 ASSERT(cond != kNoCondition); |
| 176 ASSERT(so.type() == 1); |
| 177 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
| 178 static_cast<int32_t>(MOV) << kOpcodeShift | |
| 179 static_cast<int32_t>(rd) << kRdShift | |
| 180 so.encoding() << kShiftImmShift | |
| 181 static_cast<int32_t>(opcode) << kShiftShift | |
| 182 static_cast<int32_t>(rm); |
| 183 Emit(encoding); |
| 184 } |
| 185 |
| 186 |
| 187 void Assembler::EmitShiftRegister(Condition cond, |
| 188 Shift opcode, |
| 189 Register rd, |
| 190 Register rm, |
| 191 ShifterOperand so) { |
| 192 ASSERT(cond != kNoCondition); |
| 193 ASSERT(so.type() == 0); |
| 194 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
| 195 static_cast<int32_t>(MOV) << kOpcodeShift | |
| 196 static_cast<int32_t>(rd) << kRdShift | |
| 197 so.encoding() << kShiftRegisterShift | |
| 198 static_cast<int32_t>(opcode) << kShiftShift | |
| 199 B4 | |
| 200 static_cast<int32_t>(rm); |
| 201 Emit(encoding); |
| 202 } |
| 203 |
| 204 |
| 205 void Assembler::EmitBranch(Condition cond, Label* label, bool link) { |
| 206 if (label->IsBound()) { |
| 207 EmitType5(cond, label->Position() - buffer_.Size(), link); |
| 208 } else { |
| 209 int position = buffer_.Size(); |
| 210 // Use the offset field of the branch instruction for linking the sites. |
| 211 EmitType5(cond, label->position_, link); |
| 212 label->LinkTo(position); |
| 213 } |
| 214 } |
| 215 |
| 216 |
| 217 void Assembler::and_(Register rd, Register rn, ShifterOperand so, |
| 218 Condition cond) { |
| 219 EmitType01(cond, so.type(), AND, 0, rn, rd, so); |
| 220 } |
| 221 |
| 222 |
| 223 void Assembler::eor(Register rd, Register rn, ShifterOperand so, |
| 224 Condition cond) { |
| 225 EmitType01(cond, so.type(), EOR, 0, rn, rd, so); |
| 226 } |
| 227 |
| 228 |
| 229 void Assembler::sub(Register rd, Register rn, ShifterOperand so, |
| 230 Condition cond) { |
| 231 EmitType01(cond, so.type(), SUB, 0, rn, rd, so); |
| 232 } |
| 233 |
| 234 void Assembler::rsb(Register rd, Register rn, ShifterOperand so, |
| 235 Condition cond) { |
| 236 EmitType01(cond, so.type(), RSB, 0, rn, rd, so); |
| 237 } |
| 238 |
| 239 void Assembler::rsbs(Register rd, Register rn, ShifterOperand so, |
| 240 Condition cond) { |
| 241 EmitType01(cond, so.type(), RSB, 1, rn, rd, so); |
| 242 } |
| 243 |
| 244 |
| 245 void Assembler::add(Register rd, Register rn, ShifterOperand so, |
| 246 Condition cond) { |
| 247 EmitType01(cond, so.type(), ADD, 0, rn, rd, so); |
| 248 } |
| 249 |
| 250 |
| 251 void Assembler::adds(Register rd, Register rn, ShifterOperand so, |
| 252 Condition cond) { |
| 253 EmitType01(cond, so.type(), ADD, 1, rn, rd, so); |
| 254 } |
| 255 |
| 256 |
| 257 void Assembler::subs(Register rd, Register rn, ShifterOperand so, |
| 258 Condition cond) { |
| 259 EmitType01(cond, so.type(), SUB, 1, rn, rd, so); |
| 260 } |
| 261 |
| 262 |
| 263 void Assembler::adc(Register rd, Register rn, ShifterOperand so, |
| 264 Condition cond) { |
| 265 EmitType01(cond, so.type(), ADC, 0, rn, rd, so); |
| 266 } |
| 267 |
| 268 |
| 269 void Assembler::sbc(Register rd, Register rn, ShifterOperand so, |
| 270 Condition cond) { |
| 271 EmitType01(cond, so.type(), SBC, 0, rn, rd, so); |
| 272 } |
| 273 |
| 274 |
| 275 void Assembler::rsc(Register rd, Register rn, ShifterOperand so, |
| 276 Condition cond) { |
| 277 EmitType01(cond, so.type(), RSC, 0, rn, rd, so); |
| 278 } |
| 279 |
| 280 |
| 281 void Assembler::tst(Register rn, ShifterOperand so, Condition cond) { |
| 282 EmitType01(cond, so.type(), TST, 1, rn, R0, so); |
| 283 } |
| 284 |
| 285 |
| 286 void Assembler::teq(Register rn, ShifterOperand so, Condition cond) { |
| 287 EmitType01(cond, so.type(), TEQ, 1, rn, R0, so); |
| 288 } |
| 289 |
| 290 |
| 291 void Assembler::cmp(Register rn, ShifterOperand so, Condition cond) { |
| 292 EmitType01(cond, so.type(), CMP, 1, rn, R0, so); |
| 293 } |
| 294 |
| 295 |
| 296 void Assembler::cmn(Register rn, ShifterOperand so, Condition cond) { |
| 297 EmitType01(cond, so.type(), CMN, 1, rn, R0, so); |
| 298 } |
| 299 |
| 300 |
| 301 void Assembler::orr(Register rd, Register rn, ShifterOperand so, |
| 302 Condition cond) { |
| 303 EmitType01(cond, so.type(), ORR, 0, rn, rd, so); |
| 304 } |
| 305 |
| 306 |
| 307 void Assembler::orrs(Register rd, Register rn, ShifterOperand so, |
| 308 Condition cond) { |
| 309 EmitType01(cond, so.type(), ORR, 1, rn, rd, so); |
| 310 } |
| 311 |
| 312 |
| 313 void Assembler::mov(Register rd, ShifterOperand so, Condition cond) { |
| 314 EmitType01(cond, so.type(), MOV, 0, R0, rd, so); |
| 315 } |
| 316 |
| 317 |
| 318 void Assembler::movs(Register rd, ShifterOperand so, Condition cond) { |
| 319 EmitType01(cond, so.type(), MOV, 1, R0, rd, so); |
| 320 } |
| 321 |
| 322 |
| 323 void Assembler::bic(Register rd, Register rn, ShifterOperand so, |
| 324 Condition cond) { |
| 325 EmitType01(cond, so.type(), BIC, 0, rn, rd, so); |
| 326 } |
| 327 |
| 328 |
| 329 void Assembler::mvn(Register rd, ShifterOperand so, Condition cond) { |
| 330 EmitType01(cond, so.type(), MVN, 0, R0, rd, so); |
| 331 } |
| 332 |
| 333 |
| 334 void Assembler::mvns(Register rd, ShifterOperand so, Condition cond) { |
| 335 EmitType01(cond, so.type(), MVN, 1, R0, rd, so); |
| 336 } |
| 337 |
| 338 |
| 339 void Assembler::clz(Register rd, Register rm, Condition cond) { |
| 340 ASSERT(rd != kNoRegister); |
| 341 ASSERT(rm != kNoRegister); |
| 342 ASSERT(cond != kNoCondition); |
| 343 ASSERT(rd != PC); |
| 344 ASSERT(rm != PC); |
| 345 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 346 B24 | B22 | B21 | (0xf << 16) | |
| 347 (static_cast<int32_t>(rd) << kRdShift) | |
| 348 (0xf << 8) | B4 | static_cast<int32_t>(rm); |
| 349 Emit(encoding); |
| 350 } |
| 351 |
| 352 |
| 353 void Assembler::movw(Register rd, uint16_t imm16, Condition cond) { |
| 354 ASSERT(cond != kNoCondition); |
| 355 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
| 356 B25 | B24 | ((imm16 >> 12) << 16) | |
| 357 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); |
| 358 Emit(encoding); |
| 359 } |
| 360 |
| 361 |
| 362 void Assembler::movt(Register rd, uint16_t imm16, Condition cond) { |
| 363 ASSERT(cond != kNoCondition); |
| 364 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
| 365 B25 | B24 | B22 | ((imm16 >> 12) << 16) | |
| 366 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); |
| 367 Emit(encoding); |
| 368 } |
| 369 |
| 370 |
| 371 void Assembler::EmitMulOp(Condition cond, int32_t opcode, |
| 372 Register rd, Register rn, |
| 373 Register rm, Register rs) { |
| 374 ASSERT(rd != kNoRegister); |
| 375 ASSERT(rn != kNoRegister); |
| 376 ASSERT(rm != kNoRegister); |
| 377 ASSERT(rs != kNoRegister); |
| 378 ASSERT(cond != kNoCondition); |
| 379 int32_t encoding = opcode | |
| 380 (static_cast<int32_t>(cond) << kConditionShift) | |
| 381 (static_cast<int32_t>(rn) << kRnShift) | |
| 382 (static_cast<int32_t>(rd) << kRdShift) | |
| 383 (static_cast<int32_t>(rs) << kRsShift) | |
| 384 B7 | B4 | |
| 385 (static_cast<int32_t>(rm) << kRmShift); |
| 386 Emit(encoding); |
| 387 } |
| 388 |
| 389 |
| 390 void Assembler::mul(Register rd, Register rn, |
| 391 Register rm, Condition cond) { |
| 392 // Assembler registers rd, rn, rm are encoded as rn, rm, rs. |
| 393 EmitMulOp(cond, 0, R0, rd, rn, rm); |
| 394 } |
| 395 |
| 396 |
| 397 void Assembler::mla(Register rd, Register rn, |
| 398 Register rm, Register ra, Condition cond) { |
| 399 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. |
| 400 EmitMulOp(cond, B21, ra, rd, rn, rm); |
| 401 } |
| 402 |
| 403 |
| 404 void Assembler::mls(Register rd, Register rn, |
| 405 Register rm, Register ra, Condition cond) { |
| 406 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. |
| 407 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm); |
| 408 } |
| 409 |
| 410 |
| 411 void Assembler::umull(Register rd_lo, Register rd_hi, |
| 412 Register rn, Register rm, Condition cond) { |
| 413 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
| 414 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm); |
| 415 } |
| 416 |
| 417 |
| 418 void Assembler::ldr(Register rd, Address ad, Condition cond) { |
| 419 EmitMemOp(cond, true, false, rd, ad); |
| 420 } |
| 421 |
| 422 |
| 423 void Assembler::str(Register rd, Address ad, Condition cond) { |
| 424 EmitMemOp(cond, false, false, rd, ad); |
| 425 } |
| 426 |
| 427 |
| 428 void Assembler::ldrb(Register rd, Address ad, Condition cond) { |
| 429 EmitMemOp(cond, true, true, rd, ad); |
| 430 } |
| 431 |
| 432 |
| 433 void Assembler::strb(Register rd, Address ad, Condition cond) { |
| 434 EmitMemOp(cond, false, true, rd, ad); |
| 435 } |
| 436 |
| 437 |
| 438 void Assembler::ldrh(Register rd, Address ad, Condition cond) { |
| 439 EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad); |
| 440 } |
| 441 |
| 442 |
| 443 void Assembler::strh(Register rd, Address ad, Condition cond) { |
| 444 EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad); |
| 445 } |
| 446 |
| 447 |
| 448 void Assembler::ldrsb(Register rd, Address ad, Condition cond) { |
| 449 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad); |
| 450 } |
| 451 |
| 452 |
| 453 void Assembler::ldrsh(Register rd, Address ad, Condition cond) { |
| 454 EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad); |
| 455 } |
| 456 |
| 457 |
| 458 void Assembler::ldrd(Register rd, Address ad, Condition cond) { |
| 459 ASSERT((rd % 2) == 0); |
| 460 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad); |
| 461 } |
| 462 |
| 463 |
| 464 void Assembler::strd(Register rd, Address ad, Condition cond) { |
| 465 ASSERT((rd % 2) == 0); |
| 466 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad); |
| 467 } |
| 468 |
| 469 |
| 470 void Assembler::ldm(BlockAddressMode am, Register base, RegList regs, |
| 471 Condition cond) { |
| 472 EmitMultiMemOp(cond, am, true, base, regs); |
| 473 } |
| 474 |
| 475 |
| 476 void Assembler::stm(BlockAddressMode am, Register base, RegList regs, |
| 477 Condition cond) { |
| 478 EmitMultiMemOp(cond, am, false, base, regs); |
| 479 } |
| 480 |
| 481 |
| 482 void Assembler::ldrex(Register rt, Register rn, Condition cond) { |
| 483 ASSERT(rn != kNoRegister); |
| 484 ASSERT(rt != kNoRegister); |
| 485 ASSERT(cond != kNoCondition); |
| 486 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 487 B24 | |
| 488 B23 | |
| 489 L | |
| 490 (static_cast<int32_t>(rn) << kLdExRnShift) | |
| 491 (static_cast<int32_t>(rt) << kLdExRtShift) | |
| 492 B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0; |
| 493 Emit(encoding); |
| 494 } |
| 495 |
| 496 |
| 497 void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) { |
| 498 ASSERT(rn != kNoRegister); |
| 499 ASSERT(rd != kNoRegister); |
| 500 ASSERT(rt != kNoRegister); |
| 501 ASSERT(cond != kNoCondition); |
| 502 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 503 B24 | |
| 504 B23 | |
| 505 (static_cast<int32_t>(rn) << kStrExRnShift) | |
| 506 (static_cast<int32_t>(rd) << kStrExRdShift) | |
| 507 B11 | B10 | B9 | B8 | B7 | B4 | |
| 508 (static_cast<int32_t>(rt) << kStrExRtShift); |
| 509 Emit(encoding); |
| 510 } |
| 511 |
| 512 |
| 513 void Assembler::clrex() { |
| 514 int32_t encoding = (kSpecialCondition << kConditionShift) | |
| 515 B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf; |
| 516 Emit(encoding); |
| 517 } |
| 518 |
| 519 |
| 520 void Assembler::nop(Condition cond) { |
| 521 ASSERT(cond != kNoCondition); |
| 522 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 523 B25 | B24 | B21 | (0xf << 12); |
| 524 Emit(encoding); |
| 525 } |
| 526 |
| 527 |
| 528 void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) { |
| 529 ASSERT(sn != kNoSRegister); |
| 530 ASSERT(rt != kNoRegister); |
| 531 ASSERT(rt != SP); |
| 532 ASSERT(rt != PC); |
| 533 ASSERT(cond != kNoCondition); |
| 534 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 535 B27 | B26 | B25 | |
| 536 ((static_cast<int32_t>(sn) >> 1)*B16) | |
| 537 (static_cast<int32_t>(rt)*B12) | B11 | B9 | |
| 538 ((static_cast<int32_t>(sn) & 1)*B7) | B4; |
| 539 Emit(encoding); |
| 540 } |
| 541 |
| 542 |
| 543 void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) { |
| 544 ASSERT(sn != kNoSRegister); |
| 545 ASSERT(rt != kNoRegister); |
| 546 ASSERT(rt != SP); |
| 547 ASSERT(rt != PC); |
| 548 ASSERT(cond != kNoCondition); |
| 549 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 550 B27 | B26 | B25 | B20 | |
| 551 ((static_cast<int32_t>(sn) >> 1)*B16) | |
| 552 (static_cast<int32_t>(rt)*B12) | B11 | B9 | |
| 553 ((static_cast<int32_t>(sn) & 1)*B7) | B4; |
| 554 Emit(encoding); |
| 555 } |
| 556 |
| 557 |
| 558 void Assembler::vmovsrr(SRegister sm, Register rt, Register rt2, |
| 559 Condition cond) { |
| 560 ASSERT(sm != kNoSRegister); |
| 561 ASSERT(sm != S31); |
| 562 ASSERT(rt != kNoRegister); |
| 563 ASSERT(rt != SP); |
| 564 ASSERT(rt != PC); |
| 565 ASSERT(rt2 != kNoRegister); |
| 566 ASSERT(rt2 != SP); |
| 567 ASSERT(rt2 != PC); |
| 568 ASSERT(cond != kNoCondition); |
| 569 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 570 B27 | B26 | B22 | |
| 571 (static_cast<int32_t>(rt2)*B16) | |
| 572 (static_cast<int32_t>(rt)*B12) | B11 | B9 | |
| 573 ((static_cast<int32_t>(sm) & 1)*B5) | B4 | |
| 574 (static_cast<int32_t>(sm) >> 1); |
| 575 Emit(encoding); |
| 576 } |
| 577 |
| 578 |
| 579 void Assembler::vmovrrs(Register rt, Register rt2, SRegister sm, |
| 580 Condition cond) { |
| 581 ASSERT(sm != kNoSRegister); |
| 582 ASSERT(sm != S31); |
| 583 ASSERT(rt != kNoRegister); |
| 584 ASSERT(rt != SP); |
| 585 ASSERT(rt != PC); |
| 586 ASSERT(rt2 != kNoRegister); |
| 587 ASSERT(rt2 != SP); |
| 588 ASSERT(rt2 != PC); |
| 589 ASSERT(rt != rt2); |
| 590 ASSERT(cond != kNoCondition); |
| 591 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 592 B27 | B26 | B22 | B20 | |
| 593 (static_cast<int32_t>(rt2)*B16) | |
| 594 (static_cast<int32_t>(rt)*B12) | B11 | B9 | |
| 595 ((static_cast<int32_t>(sm) & 1)*B5) | B4 | |
| 596 (static_cast<int32_t>(sm) >> 1); |
| 597 Emit(encoding); |
| 598 } |
| 599 |
| 600 |
| 601 void Assembler::vmovdrr(DRegister dm, Register rt, Register rt2, |
| 602 Condition cond) { |
| 603 ASSERT(dm != kNoDRegister); |
| 604 ASSERT(rt != kNoRegister); |
| 605 ASSERT(rt != SP); |
| 606 ASSERT(rt != PC); |
| 607 ASSERT(rt2 != kNoRegister); |
| 608 ASSERT(rt2 != SP); |
| 609 ASSERT(rt2 != PC); |
| 610 ASSERT(cond != kNoCondition); |
| 611 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 612 B27 | B26 | B22 | |
| 613 (static_cast<int32_t>(rt2)*B16) | |
| 614 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 | |
| 615 ((static_cast<int32_t>(dm) >> 4)*B5) | B4 | |
| 616 (static_cast<int32_t>(dm) & 0xf); |
| 617 Emit(encoding); |
| 618 } |
| 619 |
| 620 |
| 621 void Assembler::vmovrrd(Register rt, Register rt2, DRegister dm, |
| 622 Condition cond) { |
| 623 ASSERT(dm != kNoDRegister); |
| 624 ASSERT(rt != kNoRegister); |
| 625 ASSERT(rt != SP); |
| 626 ASSERT(rt != PC); |
| 627 ASSERT(rt2 != kNoRegister); |
| 628 ASSERT(rt2 != SP); |
| 629 ASSERT(rt2 != PC); |
| 630 ASSERT(rt != rt2); |
| 631 ASSERT(cond != kNoCondition); |
| 632 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 633 B27 | B26 | B22 | B20 | |
| 634 (static_cast<int32_t>(rt2)*B16) | |
| 635 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 | |
| 636 ((static_cast<int32_t>(dm) >> 4)*B5) | B4 | |
| 637 (static_cast<int32_t>(dm) & 0xf); |
| 638 Emit(encoding); |
| 639 } |
| 640 |
| 641 |
| 642 void Assembler::vldrs(SRegister sd, Address ad, Condition cond) { |
| 643 ASSERT(sd != kNoSRegister); |
| 644 ASSERT(cond != kNoCondition); |
| 645 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 646 B27 | B26 | B24 | B20 | |
| 647 ((static_cast<int32_t>(sd) & 1)*B22) | |
| 648 ((static_cast<int32_t>(sd) >> 1)*B12) | |
| 649 B11 | B9 | ad.vencoding(); |
| 650 Emit(encoding); |
| 651 } |
| 652 |
| 653 |
| 654 void Assembler::vstrs(SRegister sd, Address ad, Condition cond) { |
| 655 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); |
| 656 ASSERT(sd != kNoSRegister); |
| 657 ASSERT(cond != kNoCondition); |
| 658 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 659 B27 | B26 | B24 | |
| 660 ((static_cast<int32_t>(sd) & 1)*B22) | |
| 661 ((static_cast<int32_t>(sd) >> 1)*B12) | |
| 662 B11 | B9 | ad.vencoding(); |
| 663 Emit(encoding); |
| 664 } |
| 665 |
| 666 |
| 667 void Assembler::vldrd(DRegister dd, Address ad, Condition cond) { |
| 668 ASSERT(dd != kNoDRegister); |
| 669 ASSERT(cond != kNoCondition); |
| 670 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 671 B27 | B26 | B24 | B20 | |
| 672 ((static_cast<int32_t>(dd) >> 4)*B22) | |
| 673 ((static_cast<int32_t>(dd) & 0xf)*B12) | |
| 674 B11 | B9 | B8 | ad.vencoding(); |
| 675 Emit(encoding); |
| 676 } |
| 677 |
| 678 |
| 679 void Assembler::vstrd(DRegister dd, Address ad, Condition cond) { |
| 680 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); |
| 681 ASSERT(dd != kNoDRegister); |
| 682 ASSERT(cond != kNoCondition); |
| 683 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 684 B27 | B26 | B24 | |
| 685 ((static_cast<int32_t>(dd) >> 4)*B22) | |
| 686 ((static_cast<int32_t>(dd) & 0xf)*B12) | |
| 687 B11 | B9 | B8 | ad.vencoding(); |
| 688 Emit(encoding); |
| 689 } |
| 690 |
| 691 |
| 692 void Assembler::EmitVFPsss(Condition cond, int32_t opcode, |
| 693 SRegister sd, SRegister sn, SRegister sm) { |
| 694 ASSERT(sd != kNoSRegister); |
| 695 ASSERT(sn != kNoSRegister); |
| 696 ASSERT(sm != kNoSRegister); |
| 697 ASSERT(cond != kNoCondition); |
| 698 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 699 B27 | B26 | B25 | B11 | B9 | opcode | |
| 700 ((static_cast<int32_t>(sd) & 1)*B22) | |
| 701 ((static_cast<int32_t>(sn) >> 1)*B16) | |
| 702 ((static_cast<int32_t>(sd) >> 1)*B12) | |
| 703 ((static_cast<int32_t>(sn) & 1)*B7) | |
| 704 ((static_cast<int32_t>(sm) & 1)*B5) | |
| 705 (static_cast<int32_t>(sm) >> 1); |
| 706 Emit(encoding); |
| 707 } |
| 708 |
| 709 |
| 710 void Assembler::EmitVFPddd(Condition cond, int32_t opcode, |
| 711 DRegister dd, DRegister dn, DRegister dm) { |
| 712 ASSERT(dd != kNoDRegister); |
| 713 ASSERT(dn != kNoDRegister); |
| 714 ASSERT(dm != kNoDRegister); |
| 715 ASSERT(cond != kNoCondition); |
| 716 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 717 B27 | B26 | B25 | B11 | B9 | B8 | opcode | |
| 718 ((static_cast<int32_t>(dd) >> 4)*B22) | |
| 719 ((static_cast<int32_t>(dn) & 0xf)*B16) | |
| 720 ((static_cast<int32_t>(dd) & 0xf)*B12) | |
| 721 ((static_cast<int32_t>(dn) >> 4)*B7) | |
| 722 ((static_cast<int32_t>(dm) >> 4)*B5) | |
| 723 (static_cast<int32_t>(dm) & 0xf); |
| 724 Emit(encoding); |
| 725 } |
| 726 |
| 727 |
| 728 void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) { |
| 729 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm); |
| 730 } |
| 731 |
| 732 |
| 733 void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) { |
| 734 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm); |
| 735 } |
| 736 |
| 737 |
| 738 bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) { |
| 739 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm); |
| 740 if (((imm32 & ((1 << 19) - 1)) == 0) && |
| 741 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) || |
| 742 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) { |
| 743 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) | |
| 744 ((imm32 >> 19) & ((1 << 6) -1)); |
| 745 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf), |
| 746 sd, S0, S0); |
| 747 return true; |
| 748 } |
| 749 return false; |
| 750 } |
| 751 |
| 752 |
| 753 bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) { |
| 754 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm); |
| 755 if (((imm64 & ((1LL << 48) - 1)) == 0) && |
| 756 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) || |
| 757 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) { |
| 758 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) | |
| 759 ((imm64 >> 48) & ((1 << 6) -1)); |
| 760 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf), |
| 761 dd, D0, D0); |
| 762 return true; |
| 763 } |
| 764 return false; |
| 765 } |
| 766 |
| 767 |
| 768 void Assembler::vadds(SRegister sd, SRegister sn, SRegister sm, |
| 769 Condition cond) { |
| 770 EmitVFPsss(cond, B21 | B20, sd, sn, sm); |
| 771 } |
| 772 |
| 773 |
| 774 void Assembler::vaddd(DRegister dd, DRegister dn, DRegister dm, |
| 775 Condition cond) { |
| 776 EmitVFPddd(cond, B21 | B20, dd, dn, dm); |
| 777 } |
| 778 |
| 779 |
| 780 void Assembler::vsubs(SRegister sd, SRegister sn, SRegister sm, |
| 781 Condition cond) { |
| 782 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm); |
| 783 } |
| 784 |
| 785 |
| 786 void Assembler::vsubd(DRegister dd, DRegister dn, DRegister dm, |
| 787 Condition cond) { |
| 788 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm); |
| 789 } |
| 790 |
| 791 |
| 792 void Assembler::vmuls(SRegister sd, SRegister sn, SRegister sm, |
| 793 Condition cond) { |
| 794 EmitVFPsss(cond, B21, sd, sn, sm); |
| 795 } |
| 796 |
| 797 |
| 798 void Assembler::vmuld(DRegister dd, DRegister dn, DRegister dm, |
| 799 Condition cond) { |
| 800 EmitVFPddd(cond, B21, dd, dn, dm); |
| 801 } |
| 802 |
| 803 |
| 804 void Assembler::vmlas(SRegister sd, SRegister sn, SRegister sm, |
| 805 Condition cond) { |
| 806 EmitVFPsss(cond, 0, sd, sn, sm); |
| 807 } |
| 808 |
| 809 |
| 810 void Assembler::vmlad(DRegister dd, DRegister dn, DRegister dm, |
| 811 Condition cond) { |
| 812 EmitVFPddd(cond, 0, dd, dn, dm); |
| 813 } |
| 814 |
| 815 |
| 816 void Assembler::vmlss(SRegister sd, SRegister sn, SRegister sm, |
| 817 Condition cond) { |
| 818 EmitVFPsss(cond, B6, sd, sn, sm); |
| 819 } |
| 820 |
| 821 |
| 822 void Assembler::vmlsd(DRegister dd, DRegister dn, DRegister dm, |
| 823 Condition cond) { |
| 824 EmitVFPddd(cond, B6, dd, dn, dm); |
| 825 } |
| 826 |
| 827 |
| 828 void Assembler::vdivs(SRegister sd, SRegister sn, SRegister sm, |
| 829 Condition cond) { |
| 830 EmitVFPsss(cond, B23, sd, sn, sm); |
| 831 } |
| 832 |
| 833 |
| 834 void Assembler::vdivd(DRegister dd, DRegister dn, DRegister dm, |
| 835 Condition cond) { |
| 836 EmitVFPddd(cond, B23, dd, dn, dm); |
| 837 } |
| 838 |
| 839 |
| 840 void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) { |
| 841 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm); |
| 842 } |
| 843 |
| 844 |
| 845 void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) { |
| 846 EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm); |
| 847 } |
| 848 |
| 849 |
| 850 void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) { |
| 851 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm); |
| 852 } |
| 853 |
| 854 |
| 855 void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) { |
| 856 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm); |
| 857 } |
| 858 |
| 859 |
| 860 void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) { |
| 861 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm); |
| 862 } |
| 863 |
| 864 void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) { |
| 865 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm); |
| 866 } |
| 867 |
| 868 |
| 869 void Assembler::EmitVFPsd(Condition cond, int32_t opcode, |
| 870 SRegister sd, DRegister dm) { |
| 871 ASSERT(sd != kNoSRegister); |
| 872 ASSERT(dm != kNoDRegister); |
| 873 ASSERT(cond != kNoCondition); |
| 874 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 875 B27 | B26 | B25 | B11 | B9 | opcode | |
| 876 ((static_cast<int32_t>(sd) & 1)*B22) | |
| 877 ((static_cast<int32_t>(sd) >> 1)*B12) | |
| 878 ((static_cast<int32_t>(dm) >> 4)*B5) | |
| 879 (static_cast<int32_t>(dm) & 0xf); |
| 880 Emit(encoding); |
| 881 } |
| 882 |
| 883 |
| 884 void Assembler::EmitVFPds(Condition cond, int32_t opcode, |
| 885 DRegister dd, SRegister sm) { |
| 886 ASSERT(dd != kNoDRegister); |
| 887 ASSERT(sm != kNoSRegister); |
| 888 ASSERT(cond != kNoCondition); |
| 889 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 890 B27 | B26 | B25 | B11 | B9 | opcode | |
| 891 ((static_cast<int32_t>(dd) >> 4)*B22) | |
| 892 ((static_cast<int32_t>(dd) & 0xf)*B12) | |
| 893 ((static_cast<int32_t>(sm) & 1)*B5) | |
| 894 (static_cast<int32_t>(sm) >> 1); |
| 895 Emit(encoding); |
| 896 } |
| 897 |
| 898 |
| 899 void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) { |
| 900 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm); |
| 901 } |
| 902 |
| 903 |
| 904 void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) { |
| 905 EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm); |
| 906 } |
| 907 |
| 908 |
| 909 void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) { |
| 910 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm); |
| 911 } |
| 912 |
| 913 |
| 914 void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) { |
| 915 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm); |
| 916 } |
| 917 |
| 918 |
| 919 void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) { |
| 920 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm); |
| 921 } |
| 922 |
| 923 |
| 924 void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) { |
| 925 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm); |
| 926 } |
| 927 |
| 928 |
| 929 void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) { |
| 930 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm); |
| 931 } |
| 932 |
| 933 |
| 934 void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) { |
| 935 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm); |
| 936 } |
| 937 |
| 938 |
| 939 void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) { |
| 940 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm); |
| 941 } |
| 942 |
| 943 |
| 944 void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) { |
| 945 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm); |
| 946 } |
| 947 |
| 948 |
| 949 void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) { |
| 950 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm); |
| 951 } |
| 952 |
| 953 |
| 954 void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) { |
| 955 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm); |
| 956 } |
| 957 |
| 958 |
| 959 void Assembler::vcmpsz(SRegister sd, Condition cond) { |
| 960 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0); |
| 961 } |
| 962 |
| 963 |
| 964 void Assembler::vcmpdz(DRegister dd, Condition cond) { |
| 965 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0); |
| 966 } |
| 967 |
| 968 |
| 969 void Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR |
| 970 ASSERT(cond != kNoCondition); |
| 971 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 972 B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 | |
| 973 (static_cast<int32_t>(PC)*B12) | |
| 974 B11 | B9 | B4; |
| 975 Emit(encoding); |
| 976 } |
| 977 |
| 978 |
| 979 void Assembler::svc(uint32_t imm24) { |
| 980 ASSERT(imm24 < (1 << 24)); |
| 981 int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24; |
| 982 Emit(encoding); |
| 983 } |
| 984 |
| 985 |
| 986 void Assembler::bkpt(uint16_t imm16) { |
| 987 int32_t encoding = (AL << kConditionShift) | B24 | B21 | |
| 988 ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf); |
| 989 Emit(encoding); |
| 990 } |
| 991 |
| 992 |
| 993 void Assembler::b(Label* label, Condition cond) { |
| 994 EmitBranch(cond, label, false); |
| 995 } |
| 996 |
| 997 |
| 998 void Assembler::bl(Label* label, Condition cond) { |
| 999 EmitBranch(cond, label, true); |
| 1000 } |
| 1001 |
| 1002 |
| 1003 void Assembler::blx(Register rm, Condition cond) { |
| 1004 ASSERT(rm != kNoRegister); |
| 1005 ASSERT(cond != kNoCondition); |
| 1006 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | |
| 1007 B24 | B21 | (0xfff << 8) | B5 | B4 | |
| 1008 (static_cast<int32_t>(rm) << kRmShift); |
| 1009 Emit(encoding); |
| 1010 } |
| 1011 |
| 1012 |
| 1013 void Assembler::MarkExceptionHandler(Label* label) { |
| 1014 EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0)); |
| 1015 Label l; |
| 1016 b(&l); |
| 1017 EmitBranch(AL, label, false); |
| 1018 Bind(&l); |
| 1019 } |
| 1020 |
| 1021 |
| 1022 void Assembler::LoadObject(Register rd, const Object& object) { |
| 1023 UNIMPLEMENTED(); |
| 1024 } |
| 1025 |
| 1026 |
| 1027 void Assembler::Bind(Label* label) { |
| 1028 ASSERT(!label->IsBound()); |
| 1029 int bound_pc = buffer_.Size(); |
| 1030 while (label->IsLinked()) { |
| 1031 int32_t position = label->Position(); |
| 1032 int32_t next = buffer_.Load<int32_t>(position); |
| 1033 int32_t encoded = Assembler::EncodeBranchOffset(bound_pc - position, next); |
| 1034 buffer_.Store<int32_t>(position, encoded); |
| 1035 label->position_ = Assembler::DecodeBranchOffset(next); |
| 1036 } |
| 1037 label->BindTo(bound_pc); |
| 1038 } |
| 1039 |
| 1040 |
| 1041 bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) { |
| 1042 switch (type) { |
| 1043 case kLoadSignedByte: |
| 1044 case kLoadSignedHalfword: |
| 1045 case kLoadUnsignedHalfword: |
| 1046 case kLoadWordPair: |
| 1047 return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3. |
| 1048 case kLoadUnsignedByte: |
| 1049 case kLoadWord: |
| 1050 return Utils::IsAbsoluteUint(12, offset); // Addressing mode 2. |
| 1051 case kLoadSWord: |
| 1052 case kLoadDWord: |
| 1053 return Utils::IsAbsoluteUint(10, offset); // VFP addressing mode. |
| 1054 default: |
| 1055 UNREACHABLE(); |
| 1056 return false; |
| 1057 } |
| 1058 } |
| 1059 |
| 1060 |
| 1061 bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) { |
| 1062 switch (type) { |
| 1063 case kStoreHalfword: |
| 1064 case kStoreWordPair: |
| 1065 return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3. |
| 1066 case kStoreByte: |
| 1067 case kStoreWord: |
| 1068 return Utils::IsAbsoluteUint(12, offset); // Addressing mode 2. |
| 1069 case kStoreSWord: |
| 1070 case kStoreDWord: |
| 1071 return Utils::IsAbsoluteUint(10, offset); // VFP addressing mode. |
| 1072 default: |
| 1073 UNREACHABLE(); |
| 1074 return false; |
| 1075 } |
| 1076 } |
| 1077 |
| 1078 |
| 1079 void Assembler::Push(Register rd, Condition cond) { |
| 1080 str(rd, Address(SP, -kWordSize, Address::PreIndex), cond); |
| 1081 } |
| 1082 |
| 1083 |
| 1084 void Assembler::Pop(Register rd, Condition cond) { |
| 1085 ldr(rd, Address(SP, kWordSize, Address::PostIndex), cond); |
| 1086 } |
| 1087 |
| 1088 |
| 1089 void Assembler::PushList(RegList regs, Condition cond) { |
| 1090 stm(DB_W, SP, regs, cond); |
| 1091 } |
| 1092 |
| 1093 |
| 1094 void Assembler::PopList(RegList regs, Condition cond) { |
| 1095 ldm(IA_W, SP, regs, cond); |
| 1096 } |
| 1097 |
| 1098 |
| 1099 void Assembler::Mov(Register rd, Register rm, Condition cond) { |
| 1100 if (rd != rm) { |
| 1101 mov(rd, ShifterOperand(rm), cond); |
| 1102 } |
| 1103 } |
| 1104 |
| 1105 |
| 1106 void Assembler::Lsl(Register rd, Register rm, uint32_t shift_imm, |
| 1107 Condition cond) { |
| 1108 ASSERT(shift_imm != 0); // Do not use Lsl if no shift is wanted. |
| 1109 mov(rd, ShifterOperand(rm, LSL, shift_imm), cond); |
| 1110 } |
| 1111 |
| 1112 |
| 1113 void Assembler::Lsr(Register rd, Register rm, uint32_t shift_imm, |
| 1114 Condition cond) { |
| 1115 ASSERT(shift_imm != 0); // Do not use Lsr if no shift is wanted. |
| 1116 if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax. |
| 1117 mov(rd, ShifterOperand(rm, LSR, shift_imm), cond); |
| 1118 } |
| 1119 |
| 1120 |
| 1121 void Assembler::Asr(Register rd, Register rm, uint32_t shift_imm, |
| 1122 Condition cond) { |
| 1123 ASSERT(shift_imm != 0); // Do not use Asr if no shift is wanted. |
| 1124 if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax. |
| 1125 mov(rd, ShifterOperand(rm, ASR, shift_imm), cond); |
| 1126 } |
| 1127 |
| 1128 |
| 1129 void Assembler::Ror(Register rd, Register rm, uint32_t shift_imm, |
| 1130 Condition cond) { |
| 1131 ASSERT(shift_imm != 0); // Use Rrx instruction. |
| 1132 mov(rd, ShifterOperand(rm, ROR, shift_imm), cond); |
| 1133 } |
| 1134 |
| 1135 |
| 1136 void Assembler::Rrx(Register rd, Register rm, Condition cond) { |
| 1137 mov(rd, ShifterOperand(rm, ROR, 0), cond); |
| 1138 } |
| 1139 |
| 1140 |
| 1141 void Assembler::Branch(const ExternalLabel* label) { |
| 1142 // TODO(regis): Revisit this code sequence. |
| 1143 LoadImmediate(IP, label->address()); // Target address is never patched. |
| 1144 mov(PC, ShifterOperand(IP)); |
| 1145 } |
| 1146 |
| 1147 |
| 1148 void Assembler::BranchLink(const ExternalLabel* label) { |
| 1149 // TODO(regis): Revisit this code sequence. |
| 1150 // Make sure that CodePatcher is able to patch this code sequence. |
| 1151 // For added code robustness, use 'blx lr' in a patchable sequence and |
| 1152 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). |
| 1153 ldr(LR, Address(PC)); |
| 1154 Label skip; |
| 1155 b(&skip); |
| 1156 Emit(label->address()); // May get patched. |
| 1157 Bind(&skip); |
| 1158 blx(LR); // Use blx instruction so that the return branch prediction works. |
| 1159 } |
| 1160 |
| 1161 |
| 1162 void Assembler::BranchLinkStore(const ExternalLabel* label, Address ad) { |
| 1163 // TODO(regis): Revisit this code sequence. |
| 1164 LoadImmediate(IP, label->address()); // Target address is never patched. |
| 1165 str(PC, ad); |
| 1166 blx(IP); // Use blx instruction so that the return branch prediction works. |
| 1167 } |
| 1168 |
| 1169 |
| 1170 void Assembler::BranchLinkOffset(Register base, int offset) { |
| 1171 ASSERT(base != PC); |
| 1172 ASSERT(base != IP); |
| 1173 if (Address::CanHoldLoadOffset(kLoadWord, offset)) { |
| 1174 ldr(IP, Address(base, offset)); |
| 1175 } else { |
| 1176 int offset_hi = offset & ~kOffset12Mask; |
| 1177 int offset_lo = offset & kOffset12Mask; |
| 1178 ShifterOperand offset_hi_op; |
| 1179 if (ShifterOperand::CanHold(offset_hi, &offset_hi_op)) { |
| 1180 add(IP, base, offset_hi_op); |
| 1181 ldr(IP, Address(IP, offset_lo)); |
| 1182 } else { |
| 1183 LoadImmediate(IP, offset_hi); |
| 1184 add(IP, IP, ShifterOperand(base)); |
| 1185 ldr(IP, Address(IP, offset_lo)); |
| 1186 } |
| 1187 } |
| 1188 blx(IP); // Use blx instruction so that the return branch prediction works. |
| 1189 } |
| 1190 |
| 1191 |
| 1192 void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) { |
| 1193 ShifterOperand shifter_op; |
| 1194 if (ShifterOperand::CanHold(value, &shifter_op)) { |
| 1195 mov(rd, shifter_op, cond); |
| 1196 } else if (ShifterOperand::CanHold(~value, &shifter_op)) { |
| 1197 mvn(rd, shifter_op, cond); |
| 1198 } else { |
| 1199 movw(rd, Utils::Low16Bits(value), cond); |
| 1200 uint16_t value_high = Utils::High16Bits(value); |
| 1201 if (value_high != 0) { |
| 1202 movt(rd, value_high, cond); |
| 1203 } |
| 1204 } |
| 1205 } |
| 1206 |
| 1207 |
| 1208 void Assembler::LoadSImmediate(SRegister sd, float value, Condition cond) { |
| 1209 if (!vmovs(sd, value, cond)) { |
| 1210 LoadImmediate(IP, bit_cast<int32_t, float>(value), cond); |
| 1211 vmovsr(sd, IP, cond); |
| 1212 } |
| 1213 } |
| 1214 |
| 1215 |
| 1216 void Assembler::LoadDImmediate(DRegister dd, |
| 1217 double value, |
| 1218 Register scratch, |
| 1219 Condition cond) { |
| 1220 // TODO(regis): Revisit this code sequence. |
| 1221 ASSERT(scratch != PC); |
| 1222 ASSERT(scratch != IP); |
| 1223 if (!vmovd(dd, value, cond)) { |
| 1224 // A scratch register and IP are needed to load an arbitrary double. |
| 1225 ASSERT(scratch != kNoRegister); |
| 1226 int64_t imm64 = bit_cast<int64_t, double>(value); |
| 1227 LoadImmediate(IP, Utils::Low32Bits(imm64), cond); |
| 1228 LoadImmediate(scratch, Utils::High32Bits(imm64), cond); |
| 1229 vmovdrr(dd, IP, scratch, cond); |
| 1230 } |
| 1231 } |
| 1232 |
| 1233 |
| 1234 void Assembler::LoadFromOffset(LoadOperandType type, |
| 1235 Register reg, |
| 1236 Register base, |
| 1237 int32_t offset, |
| 1238 Condition cond) { |
| 1239 if (!Address::CanHoldLoadOffset(type, offset)) { |
| 1240 ASSERT(base != IP); |
| 1241 LoadImmediate(IP, offset, cond); |
| 1242 add(IP, IP, ShifterOperand(base), cond); |
| 1243 base = IP; |
| 1244 offset = 0; |
| 1245 } |
| 1246 ASSERT(Address::CanHoldLoadOffset(type, offset)); |
| 1247 switch (type) { |
| 1248 case kLoadSignedByte: |
| 1249 ldrsb(reg, Address(base, offset), cond); |
| 1250 break; |
| 1251 case kLoadUnsignedByte: |
| 1252 ldrb(reg, Address(base, offset), cond); |
| 1253 break; |
| 1254 case kLoadSignedHalfword: |
| 1255 ldrsh(reg, Address(base, offset), cond); |
| 1256 break; |
| 1257 case kLoadUnsignedHalfword: |
| 1258 ldrh(reg, Address(base, offset), cond); |
| 1259 break; |
| 1260 case kLoadWord: |
| 1261 ldr(reg, Address(base, offset), cond); |
| 1262 break; |
| 1263 case kLoadWordPair: |
| 1264 ldrd(reg, Address(base, offset), cond); |
| 1265 break; |
| 1266 default: |
| 1267 UNREACHABLE(); |
| 1268 } |
| 1269 } |
| 1270 |
| 1271 |
| 1272 void Assembler::StoreToOffset(StoreOperandType type, |
| 1273 Register reg, |
| 1274 Register base, |
| 1275 int32_t offset, |
| 1276 Condition cond) { |
| 1277 if (!Address::CanHoldStoreOffset(type, offset)) { |
| 1278 ASSERT(reg != IP); |
| 1279 ASSERT(base != IP); |
| 1280 LoadImmediate(IP, offset, cond); |
| 1281 add(IP, IP, ShifterOperand(base), cond); |
| 1282 base = IP; |
| 1283 offset = 0; |
| 1284 } |
| 1285 ASSERT(Address::CanHoldStoreOffset(type, offset)); |
| 1286 switch (type) { |
| 1287 case kStoreByte: |
| 1288 strb(reg, Address(base, offset), cond); |
| 1289 break; |
| 1290 case kStoreHalfword: |
| 1291 strh(reg, Address(base, offset), cond); |
| 1292 break; |
| 1293 case kStoreWord: |
| 1294 str(reg, Address(base, offset), cond); |
| 1295 break; |
| 1296 case kStoreWordPair: |
| 1297 strd(reg, Address(base, offset), cond); |
| 1298 break; |
| 1299 default: |
| 1300 UNREACHABLE(); |
| 1301 } |
| 1302 } |
| 1303 |
| 1304 |
| 1305 void Assembler::LoadSFromOffset(SRegister reg, |
| 1306 Register base, |
| 1307 int32_t offset, |
| 1308 Condition cond) { |
| 1309 if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) { |
| 1310 ASSERT(base != IP); |
| 1311 LoadImmediate(IP, offset, cond); |
| 1312 add(IP, IP, ShifterOperand(base), cond); |
| 1313 base = IP; |
| 1314 offset = 0; |
| 1315 } |
| 1316 ASSERT(Address::CanHoldLoadOffset(kLoadSWord, offset)); |
| 1317 vldrs(reg, Address(base, offset), cond); |
| 1318 } |
| 1319 |
| 1320 |
| 1321 void Assembler::StoreSToOffset(SRegister reg, |
| 1322 Register base, |
| 1323 int32_t offset, |
| 1324 Condition cond) { |
| 1325 if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) { |
| 1326 ASSERT(base != IP); |
| 1327 LoadImmediate(IP, offset, cond); |
| 1328 add(IP, IP, ShifterOperand(base), cond); |
| 1329 base = IP; |
| 1330 offset = 0; |
| 1331 } |
| 1332 ASSERT(Address::CanHoldStoreOffset(kStoreSWord, offset)); |
| 1333 vstrs(reg, Address(base, offset), cond); |
| 1334 } |
| 1335 |
| 1336 |
| 1337 void Assembler::LoadDFromOffset(DRegister reg, |
| 1338 Register base, |
| 1339 int32_t offset, |
| 1340 Condition cond) { |
| 1341 if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) { |
| 1342 ASSERT(base != IP); |
| 1343 LoadImmediate(IP, offset, cond); |
| 1344 add(IP, IP, ShifterOperand(base), cond); |
| 1345 base = IP; |
| 1346 offset = 0; |
| 1347 } |
| 1348 ASSERT(Address::CanHoldLoadOffset(kLoadDWord, offset)); |
| 1349 vldrd(reg, Address(base, offset), cond); |
| 1350 } |
| 1351 |
| 1352 |
| 1353 void Assembler::StoreDToOffset(DRegister reg, |
| 1354 Register base, |
| 1355 int32_t offset, |
| 1356 Condition cond) { |
| 1357 if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) { |
| 1358 ASSERT(base != IP); |
| 1359 LoadImmediate(IP, offset, cond); |
| 1360 add(IP, IP, ShifterOperand(base), cond); |
| 1361 base = IP; |
| 1362 offset = 0; |
| 1363 } |
| 1364 ASSERT(Address::CanHoldStoreOffset(kStoreDWord, offset)); |
| 1365 vstrd(reg, Address(base, offset), cond); |
| 1366 } |
| 1367 |
| 1368 |
| 1369 void Assembler::AddConstant(Register rd, int32_t value, Condition cond) { |
| 1370 AddConstant(rd, rd, value, cond); |
| 1371 } |
| 1372 |
| 1373 |
| 1374 void Assembler::AddConstant(Register rd, Register rn, int32_t value, |
| 1375 Condition cond) { |
| 1376 if (value == 0) { |
| 1377 if (rd != rn) { |
| 1378 mov(rd, ShifterOperand(rn), cond); |
| 1379 } |
| 1380 return; |
| 1381 } |
| 1382 // We prefer to select the shorter code sequence rather than selecting add for |
| 1383 // positive values and sub for negatives ones, which would slightly improve |
| 1384 // the readability of generated code for some constants. |
| 1385 ShifterOperand shifter_op; |
| 1386 if (ShifterOperand::CanHold(value, &shifter_op)) { |
| 1387 add(rd, rn, shifter_op, cond); |
| 1388 } else if (ShifterOperand::CanHold(-value, &shifter_op)) { |
| 1389 sub(rd, rn, shifter_op, cond); |
| 1390 } else { |
| 1391 ASSERT(rn != IP); |
| 1392 if (ShifterOperand::CanHold(~value, &shifter_op)) { |
| 1393 mvn(IP, shifter_op, cond); |
| 1394 add(rd, rn, ShifterOperand(IP), cond); |
| 1395 } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) { |
| 1396 mvn(IP, shifter_op, cond); |
| 1397 sub(rd, rn, ShifterOperand(IP), cond); |
| 1398 } else { |
| 1399 movw(IP, Utils::Low16Bits(value), cond); |
| 1400 uint16_t value_high = Utils::High16Bits(value); |
| 1401 if (value_high != 0) { |
| 1402 movt(IP, value_high, cond); |
| 1403 } |
| 1404 add(rd, rn, ShifterOperand(IP), cond); |
| 1405 } |
| 1406 } |
| 1407 } |
| 1408 |
| 1409 |
| 1410 void Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value, |
| 1411 Condition cond) { |
| 1412 ShifterOperand shifter_op; |
| 1413 if (ShifterOperand::CanHold(value, &shifter_op)) { |
| 1414 adds(rd, rn, shifter_op, cond); |
| 1415 } else if (ShifterOperand::CanHold(-value, &shifter_op)) { |
| 1416 subs(rd, rn, shifter_op, cond); |
| 1417 } else { |
| 1418 ASSERT(rn != IP); |
| 1419 if (ShifterOperand::CanHold(~value, &shifter_op)) { |
| 1420 mvn(IP, shifter_op, cond); |
| 1421 adds(rd, rn, ShifterOperand(IP), cond); |
| 1422 } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) { |
| 1423 mvn(IP, shifter_op, cond); |
| 1424 subs(rd, rn, ShifterOperand(IP), cond); |
| 1425 } else { |
| 1426 movw(IP, Utils::Low16Bits(value), cond); |
| 1427 uint16_t value_high = Utils::High16Bits(value); |
| 1428 if (value_high != 0) { |
| 1429 movt(IP, value_high, cond); |
| 1430 } |
| 1431 adds(rd, rn, ShifterOperand(IP), cond); |
| 1432 } |
| 1433 } |
| 1434 } |
| 1435 |
| 1436 |
| 1437 void Assembler::AddConstantWithCarry(Register rd, Register rn, int32_t value, |
| 1438 Condition cond) { |
| 1439 ShifterOperand shifter_op; |
| 1440 if (ShifterOperand::CanHold(value, &shifter_op)) { |
| 1441 adc(rd, rn, shifter_op, cond); |
| 1442 } else if (ShifterOperand::CanHold(-value - 1, &shifter_op)) { |
| 1443 sbc(rd, rn, shifter_op, cond); |
| 1444 } else { |
| 1445 ASSERT(rn != IP); |
| 1446 if (ShifterOperand::CanHold(~value, &shifter_op)) { |
| 1447 mvn(IP, shifter_op, cond); |
| 1448 adc(rd, rn, ShifterOperand(IP), cond); |
| 1449 } else if (ShifterOperand::CanHold(~(-value - 1), &shifter_op)) { |
| 1450 mvn(IP, shifter_op, cond); |
| 1451 sbc(rd, rn, ShifterOperand(IP), cond); |
| 1452 } else { |
| 1453 movw(IP, Utils::Low16Bits(value), cond); |
| 1454 uint16_t value_high = Utils::High16Bits(value); |
| 1455 if (value_high != 0) { |
| 1456 movt(IP, value_high, cond); |
| 1457 } |
| 1458 adc(rd, rn, ShifterOperand(IP), cond); |
| 1459 } |
| 1460 } |
| 1461 } |
| 1462 |
| 1463 |
| 1464 void Assembler::Stop(const char* message) { |
| 1465 if (FLAG_print_stop_message) { |
| 1466 UNIMPLEMENTED(); // Emit call to StubCode::PrintStopMessage(). |
| 1467 } |
| 1468 // Emit the message address before the svc instruction, so that we can |
| 1469 // 'unstop' and continue execution in the simulator or jump to the next |
| 1470 // instruction in gdb. |
| 1471 Label stop; |
| 1472 b(&stop); |
| 1473 Emit(reinterpret_cast<int32_t>(message)); |
| 1474 Bind(&stop); |
| 1475 svc(kStopMessageSvcCode); |
| 1476 } |
| 1477 |
| 1478 |
| 1479 int32_t Assembler::EncodeBranchOffset(int offset, int32_t inst) { |
| 1480 // The offset is off by 8 due to the way the ARM CPUs read PC. |
| 1481 offset -= 8; |
| 1482 ASSERT(Utils::IsAligned(offset, 4)); |
| 1483 ASSERT(Utils::IsInt(Utils::CountOneBits(kBranchOffsetMask), offset)); |
| 1484 |
| 1485 // Properly preserve only the bits supported in the instruction. |
| 1486 offset >>= 2; |
| 1487 offset &= kBranchOffsetMask; |
| 1488 return (inst & ~kBranchOffsetMask) | offset; |
| 1489 } |
| 1490 |
| 1491 |
| 1492 int Assembler::DecodeBranchOffset(int32_t inst) { |
| 1493 // Sign-extend, left-shift by 2, then add 8. |
| 1494 return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8); |
| 1495 } |
15 | 1496 |
16 } // namespace dart | 1497 } // namespace dart |
17 | 1498 |
18 #endif // defined TARGET_ARCH_ARM | 1499 #endif // defined TARGET_ARCH_ARM |
19 | 1500 |
OLD | NEW |