Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(54)

Side by Side Diff: src/arm/assembler-thumb2.cc

Issue 651029: Forking disassembler and simulator for Thumb2 support; (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/assembler-thumb2.h ('k') | src/arm/assembler-thumb2-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
175 DwVfpRegister d15 = { 15 }; 175 DwVfpRegister d15 = { 15 };
176 176
177 // ----------------------------------------------------------------------------- 177 // -----------------------------------------------------------------------------
178 // Implementation of RelocInfo 178 // Implementation of RelocInfo
179 179
180 const int RelocInfo::kApplyMask = 0; 180 const int RelocInfo::kApplyMask = 0;
181 181
182 182
183 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { 183 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
184 // Patch the code at the current address with the supplied instructions. 184 // Patch the code at the current address with the supplied instructions.
185 Instr* pc = reinterpret_cast<Instr*>(pc_); 185 InstrArm* pc = reinterpret_cast<InstrArm*>(pc_);
186 Instr* instr = reinterpret_cast<Instr*>(instructions); 186 InstrArm* instr = reinterpret_cast<InstrArm*>(instructions);
187 for (int i = 0; i < instruction_count; i++) { 187 for (int i = 0; i < instruction_count; i++) {
188 *(pc + i) = *(instr + i); 188 *(pc + i) = *(instr + i);
189 } 189 }
190 190
191 // Indicate that code has changed. 191 // Indicate that code has changed.
192 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); 192 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrArmSize);
193 } 193 }
194 194
195 195
196 // Patch the code at the current PC with a call to the target address. 196 // Patch the code at the current PC with a call to the target address.
197 // Additional guard instructions can be added if required. 197 // Additional guard instructions can be added if required.
198 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { 198 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
199 // Patch the code at the current address with a call to the target. 199 // Patch the code at the current address with a call to the target.
200 UNIMPLEMENTED(); 200 UNIMPLEMENTED();
201 } 201 }
202 202
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
283 L = 1 << 20, // load (or store) 283 L = 1 << 20, // load (or store)
284 S = 1 << 20, // set condition code (or leave unchanged) 284 S = 1 << 20, // set condition code (or leave unchanged)
285 W = 1 << 21, // writeback base register (or leave unchanged) 285 W = 1 << 21, // writeback base register (or leave unchanged)
286 A = 1 << 21, // accumulate in multiply instruction (or not) 286 A = 1 << 21, // accumulate in multiply instruction (or not)
287 B = 1 << 22, // unsigned byte (or word) 287 B = 1 << 22, // unsigned byte (or word)
288 N = 1 << 22, // long (or short) 288 N = 1 << 22, // long (or short)
289 U = 1 << 23, // positive (or negative) offset/index 289 U = 1 << 23, // positive (or negative) offset/index
290 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing) 290 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
291 I = 1 << 25, // immediate shifter operand (or not) 291 I = 1 << 25, // immediate shifter operand (or not)
292 292
293 B4 = 1 << 4,
294 B5 = 1 << 5,
295 B6 = 1 << 6,
296 B7 = 1 << 7,
297 B8 = 1 << 8,
298 B9 = 1 << 9,
299 B12 = 1 << 12,
300 B16 = 1 << 16,
301 B18 = 1 << 18,
302 B19 = 1 << 19,
303 B20 = 1 << 20,
304 B21 = 1 << 21,
305 B22 = 1 << 22,
306 B23 = 1 << 23,
307 B24 = 1 << 24,
308 B25 = 1 << 25,
309 B26 = 1 << 26,
310 B27 = 1 << 27,
311
312 // Instruction bit masks. 293 // Instruction bit masks.
313 RdMask = 15 << 12, // in str instruction 294 RdMask = 15 << 12, // in str instruction
314 CondMask = 15 << 28, 295 CondMask = 15 << 28,
315 CoprocessorMask = 15 << 8, 296 CoprocessorMask = 15 << 8,
316 OpCodeMask = 15 << 21, // in data-processing instructions 297 OpCodeMask = 15 << 21, // in data-processing instructions
317 Imm24Mask = (1 << 24) - 1, 298 Imm24Mask = (1 << 24) - 1,
318 Off12Mask = (1 << 12) - 1, 299 Off12Mask = (1 << 12) - 1,
319 // Reserved condition. 300 // Reserved condition.
320 nv = 15 << 28 301 nv = 15 << 28
321 }; 302 };
322 303
323 304
324 // add(sp, sp, 4) instruction (aka Pop()) 305 // add(sp, sp, 4) instruction (aka Pop())
325 static const Instr kPopInstruction = 306 static const InstrArm kPopInstruction =
326 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12; 307 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
327 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) 308 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
328 // register r is not encoded. 309 // register r is not encoded.
329 static const Instr kPushRegPattern = 310 static const InstrArm kPushRegPattern =
330 al | B26 | 4 | NegPreIndex | sp.code() * B16; 311 al | B26 | 4 | NegPreIndex | sp.code() * B16;
331 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) 312 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
332 // register r is not encoded. 313 // register r is not encoded.
333 static const Instr kPopRegPattern = 314 static const InstrArm kPopRegPattern =
334 al | B26 | L | 4 | PostIndex | sp.code() * B16; 315 al | B26 | L | 4 | PostIndex | sp.code() * B16;
335 // mov lr, pc 316 // mov lr, pc
336 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12; 317 const InstrArm kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
337 // ldr pc, [pc, #XXX] 318 // ldr pc, [pc, #XXX]
338 const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16; 319 const InstrArm kLdrPCPattern = al | B26 | L | pc.code() * B16;
339 320
340 // Spare buffer. 321 // Spare buffer.
341 static const int kMinimalBufferSize = 4*KB; 322 static const int kMinimalBufferSize = 4*KB;
342 static byte* spare_buffer_ = NULL; 323 static byte* spare_buffer_ = NULL;
343 324
344 Assembler::Assembler(void* buffer, int buffer_size) { 325 Assembler::Assembler(void* buffer, int buffer_size) {
345 if (buffer == NULL) { 326 if (buffer == NULL) {
346 // Do our own buffer management. 327 // Do our own buffer management.
347 if (buffer_size <= kMinimalBufferSize) { 328 if (buffer_size <= kMinimalBufferSize) {
348 buffer_size = kMinimalBufferSize; 329 buffer_size = kMinimalBufferSize;
(...skipping 25 matching lines...) Expand all
374 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); 355 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
375 num_prinfo_ = 0; 356 num_prinfo_ = 0;
376 next_buffer_check_ = 0; 357 next_buffer_check_ = 0;
377 no_const_pool_before_ = 0; 358 no_const_pool_before_ = 0;
378 last_const_pool_end_ = 0; 359 last_const_pool_end_ = 0;
379 last_bound_pos_ = 0; 360 last_bound_pos_ = 0;
380 current_statement_position_ = RelocInfo::kNoPosition; 361 current_statement_position_ = RelocInfo::kNoPosition;
381 current_position_ = RelocInfo::kNoPosition; 362 current_position_ = RelocInfo::kNoPosition;
382 written_statement_position_ = current_statement_position_; 363 written_statement_position_ = current_statement_position_;
383 written_position_ = current_position_; 364 written_position_ = current_position_;
365 thumb_mode_ = false;
384 } 366 }
385 367
386 368
387 Assembler::~Assembler() { 369 Assembler::~Assembler() {
388 if (own_buffer_) { 370 if (own_buffer_) {
389 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { 371 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
390 spare_buffer_ = buffer_; 372 spare_buffer_ = buffer_;
391 } else { 373 } else {
392 DeleteArray(buffer_); 374 DeleteArray(buffer_);
393 } 375 }
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
425 // Linked labels refer to unknown positions in the code 407 // Linked labels refer to unknown positions in the code
426 // to be generated; pos() is the position of the last 408 // to be generated; pos() is the position of the last
427 // instruction using the label. 409 // instruction using the label.
428 410
429 411
430 // The link chain is terminated by a negative code position (must be aligned) 412 // The link chain is terminated by a negative code position (must be aligned)
431 const int kEndOfChain = -4; 413 const int kEndOfChain = -4;
432 414
433 415
434 int Assembler::target_at(int pos) { 416 int Assembler::target_at(int pos) {
435 Instr instr = instr_at(pos); 417 InstrArm instr = instr_arm_at(pos);
436 if ((instr & ~Imm24Mask) == 0) { 418 if ((instr & ~Imm24Mask) == 0) {
437 // Emitted label constant, not part of a branch. 419 // Emitted label constant, not part of a branch.
438 return instr - (Code::kHeaderSize - kHeapObjectTag); 420 return instr - (Code::kHeaderSize - kHeapObjectTag);
439 } 421 }
440 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 422 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
441 int imm26 = ((instr & Imm24Mask) << 8) >> 6; 423 int imm26 = ((instr & Imm24Mask) << 8) >> 6;
442 if ((instr & CondMask) == nv && (instr & B24) != 0) 424 if ((instr & CondMask) == nv && (instr & B24) != 0)
443 // blx uses bit 24 to encode bit 2 of imm26 425 // blx uses bit 24 to encode bit 2 of imm26
444 imm26 += 2; 426 imm26 += 2;
445 427
446 return pos + kPcLoadDelta + imm26; 428 return pos + kPcLoadDelta + imm26;
447 } 429 }
448 430
449 431
450 void Assembler::target_at_put(int pos, int target_pos) { 432 void Assembler::target_at_put(int pos, int target_pos) {
451 Instr instr = instr_at(pos); 433 InstrArm instr = instr_arm_at(pos);
452 if ((instr & ~Imm24Mask) == 0) { 434 if ((instr & ~Imm24Mask) == 0) {
453 ASSERT(target_pos == kEndOfChain || target_pos >= 0); 435 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
454 // Emitted label constant, not part of a branch. 436 // Emitted label constant, not part of a branch.
455 // Make label relative to Code* of generated Code object. 437 // Make label relative to Code* of generated Code object.
456 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); 438 instr_arm_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
457 return; 439 return;
458 } 440 }
459 int imm26 = target_pos - (pos + kPcLoadDelta); 441 int imm26 = target_pos - (pos + kPcLoadDelta);
460 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 442 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
461 if ((instr & CondMask) == nv) { 443 if ((instr & CondMask) == nv) {
462 // blx uses bit 24 to encode bit 2 of imm26 444 // blx uses bit 24 to encode bit 2 of imm26
463 ASSERT((imm26 & 1) == 0); 445 ASSERT((imm26 & 1) == 0);
464 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24; 446 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
465 } else { 447 } else {
466 ASSERT((imm26 & 3) == 0); 448 ASSERT((imm26 & 3) == 0);
467 instr &= ~Imm24Mask; 449 instr &= ~Imm24Mask;
468 } 450 }
469 int imm24 = imm26 >> 2; 451 int imm24 = imm26 >> 2;
470 ASSERT(is_int24(imm24)); 452 ASSERT(is_int24(imm24));
471 instr_at_put(pos, instr | (imm24 & Imm24Mask)); 453 instr_arm_at_put(pos, instr | (imm24 & Imm24Mask));
472 } 454 }
473 455
474 456
475 void Assembler::print(Label* L) { 457 void Assembler::print(Label* L) {
476 if (L->is_unused()) { 458 if (L->is_unused()) {
477 PrintF("unused label\n"); 459 PrintF("unused label\n");
478 } else if (L->is_bound()) { 460 } else if (L->is_bound()) {
479 PrintF("bound label to %d\n", L->pos()); 461 PrintF("bound label to %d\n", L->pos());
480 } else if (L->is_linked()) { 462 } else if (L->is_linked()) {
481 Label l = *L; 463 Label l = *L;
482 PrintF("unbound label"); 464 PrintF("unbound label");
483 while (l.is_linked()) { 465 while (l.is_linked()) {
484 PrintF("@ %d ", l.pos()); 466 PrintF("@ %d ", l.pos());
485 Instr instr = instr_at(l.pos()); 467 InstrArm instr = instr_arm_at(l.pos());
486 if ((instr & ~Imm24Mask) == 0) { 468 if ((instr & ~Imm24Mask) == 0) {
487 PrintF("value\n"); 469 PrintF("value\n");
488 } else { 470 } else {
489 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx 471 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
490 int cond = instr & CondMask; 472 int cond = instr & CondMask;
491 const char* b; 473 const char* b;
492 const char* c; 474 const char* c;
493 if (cond == nv) { 475 if (cond == nv) {
494 b = "blx"; 476 b = "blx";
495 c = ""; 477 c = "";
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
582 ASSERT(link == kEndOfChain); 564 ASSERT(link == kEndOfChain);
583 L->Unuse(); 565 L->Unuse();
584 } 566 }
585 } 567 }
586 568
587 569
588 // Low-level code emission routines depending on the addressing mode. 570 // Low-level code emission routines depending on the addressing mode.
589 static bool fits_shifter(uint32_t imm32, 571 static bool fits_shifter(uint32_t imm32,
590 uint32_t* rotate_imm, 572 uint32_t* rotate_imm,
591 uint32_t* immed_8, 573 uint32_t* immed_8,
592 Instr* instr) { 574 InstrArm* instr) {
593 // imm32 must be unsigned. 575 // imm32 must be unsigned.
594 for (int rot = 0; rot < 16; rot++) { 576 for (int rot = 0; rot < 16; rot++) {
595 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); 577 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
596 if ((imm8 <= 0xff)) { 578 if ((imm8 <= 0xff)) {
597 *rotate_imm = rot; 579 *rotate_imm = rot;
598 *immed_8 = imm8; 580 *immed_8 = imm8;
599 return true; 581 return true;
600 } 582 }
601 } 583 }
602 // If the opcode is mov or mvn and if ~imm32 fits, change the opcode. 584 // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
(...skipping 19 matching lines...) Expand all
622 } 604 }
623 #endif 605 #endif
624 return Serializer::enabled(); 606 return Serializer::enabled();
625 } else if (rmode == RelocInfo::NONE) { 607 } else if (rmode == RelocInfo::NONE) {
626 return false; 608 return false;
627 } 609 }
628 return true; 610 return true;
629 } 611 }
630 612
631 613
632 void Assembler::addrmod1(Instr instr, 614 void Assembler::DataProcessing(Condition cond, Opcode op, SBit s,
615 Register rn,
616 Register rd,
617 const Operand& x) {
618 if (cond != al) {
619 addrmod1(cond | op * B21 | s, rn, rd, x);
620 } else if (!x.rm_.is_valid()) { // immediate data
621 addrmod1(cond | op * B21 | s, rn, rd, x);
622 } else if (!x.rs_.is_valid()) { // immediate shift
623 DataProcessingReg(op, s, rn, rd, x.rm_, x.shift_op_, x.shift_imm_);
624 // Go back immediately to avoid issues with bind() for now when
625 // the label points to an ARM instruction.
626 EnsureArmMode();
627 } else { // Register shift.
628 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
629 addrmod1(cond | op * B21 | s, rn, rd, x);
630 }
631 if (rn.is(pc) || x.rm_.is(pc)) {
632 // Block constant pool emission for one instruction after reading pc.
633 BlockConstPoolBefore(pc_offset() + kInstrArmSize);
634 }
635 }
636
637 void Assembler::DataProcessingImm(Opcode op, SBit s, Register rn, Register rd,
638 int imm) {
639 InstrArm i0 = 0xf000; // 1111 0iio ooon nnnn
640 switch (op) {
641 case ADD:
642 i0 |= 0x8 * B5; // 1000
643 break;
644 case AND:
645 i0 |= 0x0 * B5; // 0000
646 break;
647 case SUB:
648 i0 |= 0xd * B5; // 1101;
649 break;
650 default:
651 UNIMPLEMENTED();
652 }
653 if (s) {
654 i0 |= B4;
655 }
656 i0 |= (imm >> 11) * B10 | rn.code();
657 emit_thumb(i0);
658 emit_thumb(((imm >> 8) & 7) * B12 | rd.code() * B8 | (imm & 0xff));
659 }
660
661 void Assembler::DataProcessingReg(Opcode op, SBit s, Register rn, Register rd,
662 Register rm, ShiftOp shiftOp, int shiftBy) {
663 InstrArm i0 = B15 | B14 | B13 | B11 | B9; // 1110 101o ooon nnnn
664 switch (op) {
665 case ADD:
666 i0 |= 0x8 * B5; // 1000
667 break;
668 case AND:
669 i0 |= 0x0 * B5; // 0000
670 break;
671 default:
672 UNIMPLEMENTED();
673 }
674
675 if (s) {
676 i0 |= B4;
677 }
678 i0 |= rn.code();
679 emit_thumb(i0);
680 InstrArm i1 = ((shiftBy >> 2) & 7) * B12 | rd.code() * B8 |
681 (shiftBy & 3) * B6 | shiftOp * B4 | rm.code();
682 emit_thumb(i1);
683 }
684
685 void Assembler::addrmod1(InstrArm instr,
633 Register rn, 686 Register rn,
634 Register rd, 687 Register rd,
635 const Operand& x) { 688 const Operand& x) {
636 CheckBuffer(); 689 CheckBuffer();
637 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); 690 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
638 if (!x.rm_.is_valid()) { 691 if (!x.rm_.is_valid()) {
639 // Immediate. 692 // Immediate.
640 uint32_t rotate_imm; 693 uint32_t rotate_imm;
641 uint32_t immed_8; 694 uint32_t immed_8;
642 if (MustUseIp(x.rmode_) || 695 if (MustUseIp(x.rmode_) ||
643 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { 696 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
644 // The immediate operand cannot be encoded as a shifter operand, so load 697 // The immediate operand cannot be encoded as a shifter operand, so load
645 // it first to register ip and change the original instruction to use ip. 698 // it first to register ip and change the original instruction to use ip.
646 // However, if the original instruction is a 'mov rd, x' (not setting the 699 // However, if the original instruction is a 'mov rd, x' (not setting the
647 // condition code), then replace it with a 'ldr rd, [pc]'. 700 // condition code), then replace it with a 'ldr rd, [pc]'.
648 RecordRelocInfo(x.rmode_, x.imm32_); 701 RecordRelocInfo(x.rmode_, x.imm32_);
649 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed 702 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
650 Condition cond = static_cast<Condition>(instr & CondMask); 703 Condition cond = static_cast<Condition>(instr & CondMask);
651 if ((instr & ~CondMask) == 13*B21) { // mov, S not set 704 if ((instr & ~CondMask) == 13*B21) { // mov, S not set
652 ldr(rd, MemOperand(pc, 0), cond); 705 ldr(rd, MemOperand(pc, 0), cond);
653 } else { 706 } else {
654 ldr(ip, MemOperand(pc, 0), cond); 707 ldr(ip, MemOperand(pc, 0), cond);
655 addrmod1(instr, rn, rd, Operand(ip)); 708 addrmod1(instr, rn, rd, Operand(ip));
656 } 709 }
657 return; 710 return;
658 } 711 }
659 instr |= I | rotate_imm*B8 | immed_8; 712 instr |= I | rotate_imm*B8 | immed_8;
660 } else if (!x.rs_.is_valid()) { 713 } else if (!x.rs_.is_valid()) {
661 // Immediate shift. 714 // Immediate shift.
662 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); 715 instr |= x.shift_imm_*B7 | x.shift_op_*B5 | x.rm_.code();
663 } else { 716 } else {
664 // Register shift. 717 // Register shift.
665 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); 718 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
666 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); 719 instr |= x.rs_.code()*B8 | x.shift_op_*B5 | B4 | x.rm_.code();
667 } 720 }
668 emit(instr | rn.code()*B16 | rd.code()*B12); 721 emit_arm(instr | rn.code()*B16 | rd.code()*B12);
669 if (rn.is(pc) || x.rm_.is(pc)) 722 if (rn.is(pc) || x.rm_.is(pc))
670 // Block constant pool emission for one instruction after reading pc. 723 // Block constant pool emission for one instruction after reading pc.
671 BlockConstPoolBefore(pc_offset() + kInstrSize); 724 BlockConstPoolBefore(pc_offset() + kInstrArmSize);
672 } 725 }
673 726
674 727
675 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { 728 void Assembler::addrmod2(InstrArm instr, Register rd, const MemOperand& x) {
676 ASSERT((instr & ~(CondMask | B | L)) == B26); 729 ASSERT((instr & ~(CondMask | B | L)) == B26);
677 int am = x.am_; 730 int am = x.am_;
678 if (!x.rm_.is_valid()) { 731 if (!x.rm_.is_valid()) {
679 // Immediate offset. 732 // Immediate offset.
680 int offset_12 = x.offset_; 733 int offset_12 = x.offset_;
681 if (offset_12 < 0) { 734 if (offset_12 < 0) {
682 offset_12 = -offset_12; 735 offset_12 = -offset_12;
683 am ^= U; 736 am ^= U;
684 } 737 }
685 if (!is_uint12(offset_12)) { 738 if (!is_uint12(offset_12)) {
686 // Immediate offset cannot be encoded, load it first to register ip 739 // Immediate offset cannot be encoded, load it first to register ip
687 // rn (and rd in a load) should never be ip, or will be trashed. 740 // rn (and rd in a load) should never be ip, or will be trashed.
688 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); 741 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
689 mov(ip, Operand(x.offset_), LeaveCC, 742 mov(ip, Operand(x.offset_), LeaveCC,
690 static_cast<Condition>(instr & CondMask)); 743 static_cast<Condition>(instr & CondMask));
691 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); 744 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
692 return; 745 return;
693 } 746 }
694 ASSERT(offset_12 >= 0); // no masking needed 747 ASSERT(offset_12 >= 0); // no masking needed
695 instr |= offset_12; 748 instr |= offset_12;
696 } else { 749 } else {
697 // Register offset (shift_imm_ and shift_op_ are 0) or scaled 750 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
698 // register offset the constructors make sure than both shift_imm_ 751 // register offset the constructors make sure than both shift_imm_
699 // and shift_op_ are initialized. 752 // and shift_op_ are initialized.
700 ASSERT(!x.rm_.is(pc)); 753 ASSERT(!x.rm_.is(pc));
701 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); 754 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
702 } 755 }
703 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback 756 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
704 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); 757 emit_arm(instr | am | x.rn_.code()*B16 | rd.code()*B12);
705 } 758 }
706 759
707 760
708 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { 761 void Assembler::addrmod3(InstrArm instr, Register rd, const MemOperand& x) {
709 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7)); 762 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
710 ASSERT(x.rn_.is_valid()); 763 ASSERT(x.rn_.is_valid());
711 int am = x.am_; 764 int am = x.am_;
712 if (!x.rm_.is_valid()) { 765 if (!x.rm_.is_valid()) {
713 // Immediate offset. 766 // Immediate offset.
714 int offset_8 = x.offset_; 767 int offset_8 = x.offset_;
715 if (offset_8 < 0) { 768 if (offset_8 < 0) {
716 offset_8 = -offset_8; 769 offset_8 = -offset_8;
717 am ^= U; 770 am ^= U;
718 } 771 }
(...skipping 15 matching lines...) Expand all
734 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, 787 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
735 static_cast<Condition>(instr & CondMask)); 788 static_cast<Condition>(instr & CondMask));
736 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); 789 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
737 return; 790 return;
738 } else { 791 } else {
739 // Register offset. 792 // Register offset.
740 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback 793 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
741 instr |= x.rm_.code(); 794 instr |= x.rm_.code();
742 } 795 }
743 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback 796 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
744 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); 797 emit_arm(instr | am | x.rn_.code()*B16 | rd.code()*B12);
745 } 798 }
746 799
747 800
748 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { 801 void Assembler::addrmod4(InstrArm instr, Register rn, RegList rl) {
749 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27); 802 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
750 ASSERT(rl != 0); 803 ASSERT(rl != 0);
751 ASSERT(!rn.is(pc)); 804 ASSERT(!rn.is(pc));
752 emit(instr | rn.code()*B16 | rl); 805 emit_arm(instr | rn.code()*B16 | rl);
753 } 806 }
754 807
755 808
756 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { 809 void Assembler::addrmod5(InstrArm instr, CRegister crd, const MemOperand& x) {
757 // Unindexed addressing is not encoded by this function. 810 // Unindexed addressing is not encoded by this function.
758 ASSERT_EQ((B27 | B26), 811 ASSERT_EQ((B27 | B26),
759 (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L))); 812 (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
760 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); 813 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
761 int am = x.am_; 814 int am = x.am_;
762 int offset_8 = x.offset_; 815 int offset_8 = x.offset_;
763 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset 816 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
764 offset_8 >>= 2; 817 offset_8 >>= 2;
765 if (offset_8 < 0) { 818 if (offset_8 < 0) {
766 offset_8 = -offset_8; 819 offset_8 = -offset_8;
767 am ^= U; 820 am ^= U;
768 } 821 }
769 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte 822 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
770 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback 823 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
771 824
772 // Post-indexed addressing requires W == 1; different than in addrmod2/3. 825 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
773 if ((am & P) == 0) 826 if ((am & P) == 0)
774 am |= W; 827 am |= W;
775 828
776 ASSERT(offset_8 >= 0); // no masking needed 829 ASSERT(offset_8 >= 0); // no masking needed
777 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8); 830 emit_arm(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
778 } 831 }
779 832
780 833
781 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { 834 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
782 int target_pos; 835 int target_pos;
783 if (L->is_bound()) { 836 if (L->is_bound()) {
784 target_pos = L->pos(); 837 target_pos = L->pos();
785 } else { 838 } else {
786 if (L->is_linked()) { 839 if (L->is_linked()) {
787 target_pos = L->pos(); // L's link 840 target_pos = L->pos(); // L's link
788 } else { 841 } else {
789 target_pos = kEndOfChain; 842 target_pos = kEndOfChain;
790 } 843 }
791 L->link_to(pc_offset()); 844 L->link_to(pc_offset());
792 } 845 }
793 846
794 // Block the emission of the constant pool, since the branch instruction must 847 // Block the emission of the constant pool, since the branch instruction must
795 // be emitted at the pc offset recorded by the label. 848 // be emitted at the pc offset recorded by the label.
796 BlockConstPoolBefore(pc_offset() + kInstrSize); 849 BlockConstPoolBefore(pc_offset() + kInstrArmSize);
797 return target_pos - (pc_offset() + kPcLoadDelta); 850 return target_pos - (pc_offset() + kPcLoadDelta);
798 } 851 }
799 852
800 853
801 void Assembler::label_at_put(Label* L, int at_offset) { 854 void Assembler::label_at_put(Label* L, int at_offset) {
802 int target_pos; 855 int target_pos;
803 if (L->is_bound()) { 856 if (L->is_bound()) {
804 target_pos = L->pos(); 857 target_pos = L->pos();
805 } else { 858 } else {
806 if (L->is_linked()) { 859 if (L->is_linked()) {
807 target_pos = L->pos(); // L's link 860 target_pos = L->pos(); // L's link
808 } else { 861 } else {
809 target_pos = kEndOfChain; 862 target_pos = kEndOfChain;
810 } 863 }
811 L->link_to(at_offset); 864 L->link_to(at_offset);
812 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); 865 instr_arm_at_put(at_offset, target_pos +
866 (Code::kHeaderSize - kHeapObjectTag));
813 } 867 }
814 } 868 }
815 869
816 870
817 // Branch instructions. 871 // Branch instructions.
818 void Assembler::b(int branch_offset, Condition cond) { 872 void Assembler::b(int branch_offset, Condition cond) {
819 ASSERT((branch_offset & 3) == 0); 873 ASSERT((branch_offset & 3) == 0);
820 int imm24 = branch_offset >> 2; 874 int imm24 = branch_offset >> 2;
821 ASSERT(is_int24(imm24)); 875 ASSERT(is_int24(imm24));
822 emit(cond | B27 | B25 | (imm24 & Imm24Mask)); 876 emit_arm(cond | B27 | B25 | (imm24 & Imm24Mask));
823 877
824 if (cond == al) 878 if (cond == al)
825 // Dead code is a good location to emit the constant pool. 879 // Dead code is a good location to emit the constant pool.
826 CheckConstPool(false, false); 880 CheckConstPool(false, false);
827 } 881 }
828 882
829 883
830 void Assembler::bl(int branch_offset, Condition cond) { 884 void Assembler::bl(int branch_offset, Condition cond) {
831 ASSERT((branch_offset & 3) == 0); 885 ASSERT((branch_offset & 3) == 0);
832 int imm24 = branch_offset >> 2; 886 int imm24 = branch_offset >> 2;
833 ASSERT(is_int24(imm24)); 887 ASSERT(is_int24(imm24));
834 emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask)); 888 emit_arm(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
835 } 889 }
836 890
837 891
838 void Assembler::blx(int branch_offset) { // v5 and above 892 void Assembler::blx(int branch_offset) { // v5 and above
839 WriteRecordedPositions(); 893 WriteRecordedPositions();
840 ASSERT((branch_offset & 1) == 0); 894 ASSERT((branch_offset & 1) == 0);
841 int h = ((branch_offset & 2) >> 1)*B24; 895 int h = ((branch_offset & 2) >> 1)*B24;
842 int imm24 = branch_offset >> 2; 896 int imm24 = branch_offset >> 2;
843 ASSERT(is_int24(imm24)); 897 ASSERT(is_int24(imm24));
844 emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask)); 898 emit_arm(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
845 } 899 }
846 900
847 901
848 void Assembler::blx(Register target, Condition cond) { // v5 and above 902 void Assembler::blx(Register target, Condition cond) { // v5 and above
849 WriteRecordedPositions(); 903 WriteRecordedPositions();
850 ASSERT(!target.is(pc)); 904 ASSERT(!target.is(pc));
851 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code()); 905 emit_arm(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
852 } 906 }
853 907
854 908
855 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t 909 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
856 WriteRecordedPositions(); 910 WriteRecordedPositions();
857 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged 911 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
858 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code()); 912 emit_arm(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
859 } 913 }
860 914
861 915
862 // Data-processing instructions. 916 // Data-processing instructions.
863 917
864 // UBFX <Rd>,<Rn>,#<lsb>,#<width - 1> 918 // UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
865 // Instruction details available in ARM DDI 0406A, A8-464. 919 // Instruction details available in ARM DDI 0406A, A8-464.
866 // cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) | 920 // cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
867 // Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0) 921 // Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
868 void Assembler::ubfx(Register dst, Register src1, const Operand& src2, 922 void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
869 const Operand& src3, Condition cond) { 923 const Operand& src3, Condition cond) {
870 ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid()); 924 ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
871 ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f); 925 ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
872 ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f); 926 ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
873 emit(cond | 0x3F*B21 | src3.imm32_*B16 | 927 emit_arm(cond | 0x3F*B21 | src3.imm32_*B16 |
874 dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code()); 928 dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
875 } 929 }
876 930
877 931
878 void Assembler::and_(Register dst, Register src1, const Operand& src2, 932 void Assembler::and_(Register dst, Register src1, const Operand& src2,
879 SBit s, Condition cond) { 933 SBit s, Condition cond) {
880 addrmod1(cond | 0*B21 | s, src1, dst, src2); 934 DataProcessing(cond, AND, s, src1, dst, src2);
881 } 935 }
882 936
883 937
884 void Assembler::eor(Register dst, Register src1, const Operand& src2, 938 void Assembler::eor(Register dst, Register src1, const Operand& src2,
885 SBit s, Condition cond) { 939 SBit s, Condition cond) {
886 addrmod1(cond | 1*B21 | s, src1, dst, src2); 940 addrmod1(cond | 1*B21 | s, src1, dst, src2);
887 } 941 }
888 942
889 943
890 void Assembler::sub(Register dst, Register src1, const Operand& src2, 944 void Assembler::sub(Register dst, Register src1, const Operand& src2,
891 SBit s, Condition cond) { 945 SBit s, Condition cond) {
892 addrmod1(cond | 2*B21 | s, src1, dst, src2); 946 addrmod1(cond | 2*B21 | s, src1, dst, src2);
893 } 947 }
894 948
895 949
896 void Assembler::rsb(Register dst, Register src1, const Operand& src2, 950 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
897 SBit s, Condition cond) { 951 SBit s, Condition cond) {
898 addrmod1(cond | 3*B21 | s, src1, dst, src2); 952 addrmod1(cond | 3*B21 | s, src1, dst, src2);
899 } 953 }
900 954
901 955
902 void Assembler::add(Register dst, Register src1, const Operand& src2, 956 void Assembler::add(Register dst, Register src1, const Operand& src2,
903 SBit s, Condition cond) { 957 SBit s, Condition cond) {
904 addrmod1(cond | 4*B21 | s, src1, dst, src2); 958 DataProcessing(cond, ADD, s, src1, dst, src2);
905 959 // TODO(haustein): Eliminate pattern: push(r), pop()
906 // Eliminate pattern: push(r), pop()
907 // str(src, MemOperand(sp, 4, NegPreIndex), al); 960 // str(src, MemOperand(sp, 4, NegPreIndex), al);
908 // add(sp, sp, Operand(kPointerSize)); 961 // add(sp, sp, Operand(kPointerSize));
909 // Both instructions can be eliminated. 962 // Both instructions can be eliminated, as in assembler-arm.cc
910 int pattern_size = 2 * kInstrSize;
911 if (FLAG_push_pop_elimination &&
912 last_bound_pos_ <= (pc_offset() - pattern_size) &&
913 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
914 // Pattern.
915 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
916 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
917 pc_ -= 2 * kInstrSize;
918 if (FLAG_print_push_pop_elimination) {
919 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
920 }
921 }
922 } 963 }
923 964
924 965
925 void Assembler::adc(Register dst, Register src1, const Operand& src2, 966 void Assembler::adc(Register dst, Register src1, const Operand& src2,
926 SBit s, Condition cond) { 967 SBit s, Condition cond) {
927 addrmod1(cond | 5*B21 | s, src1, dst, src2); 968 addrmod1(cond | 5*B21 | s, src1, dst, src2);
928 } 969 }
929 970
930 971
931 void Assembler::sbc(Register dst, Register src1, const Operand& src2, 972 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
982 1023
983 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { 1024 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
984 addrmod1(cond | 15*B21 | s, r0, dst, src); 1025 addrmod1(cond | 15*B21 | s, r0, dst, src);
985 } 1026 }
986 1027
987 1028
988 // Multiply instructions. 1029 // Multiply instructions.
989 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, 1030 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
990 SBit s, Condition cond) { 1031 SBit s, Condition cond) {
991 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); 1032 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
992 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | 1033 emit_arm(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
993 src2.code()*B8 | B7 | B4 | src1.code()); 1034 src2.code()*B8 | B7 | B4 | src1.code());
994 } 1035 }
995 1036
996 1037
997 void Assembler::mul(Register dst, Register src1, Register src2, 1038 void Assembler::mul(Register dst, Register src1, Register src2,
998 SBit s, Condition cond) { 1039 SBit s, Condition cond) {
999 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); 1040 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1000 // dst goes in bits 16-19 for this instruction! 1041 // dst goes in bits 16-19 for this instruction!
1001 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code()); 1042 emit_arm(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1002 } 1043 }
1003 1044
1004 1045
1005 void Assembler::smlal(Register dstL, 1046 void Assembler::smlal(Register dstL,
1006 Register dstH, 1047 Register dstH,
1007 Register src1, 1048 Register src1,
1008 Register src2, 1049 Register src2,
1009 SBit s, 1050 SBit s,
1010 Condition cond) { 1051 Condition cond) {
1011 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); 1052 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1012 ASSERT(!dstL.is(dstH)); 1053 ASSERT(!dstL.is(dstH));
1013 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 | 1054 emit_arm(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1014 src2.code()*B8 | B7 | B4 | src1.code()); 1055 src2.code()*B8 | B7 | B4 | src1.code());
1015 } 1056 }
1016 1057
1017 1058
1018 void Assembler::smull(Register dstL, 1059 void Assembler::smull(Register dstL,
1019 Register dstH, 1060 Register dstH,
1020 Register src1, 1061 Register src1,
1021 Register src2, 1062 Register src2,
1022 SBit s, 1063 SBit s,
1023 Condition cond) { 1064 Condition cond) {
1024 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); 1065 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1025 ASSERT(!dstL.is(dstH)); 1066 ASSERT(!dstL.is(dstH));
1026 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 | 1067 emit_arm(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1027 src2.code()*B8 | B7 | B4 | src1.code()); 1068 src2.code()*B8 | B7 | B4 | src1.code());
1028 } 1069 }
1029 1070
1030 1071
1031 void Assembler::umlal(Register dstL, 1072 void Assembler::umlal(Register dstL,
1032 Register dstH, 1073 Register dstH,
1033 Register src1, 1074 Register src1,
1034 Register src2, 1075 Register src2,
1035 SBit s, 1076 SBit s,
1036 Condition cond) { 1077 Condition cond) {
1037 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); 1078 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1038 ASSERT(!dstL.is(dstH)); 1079 ASSERT(!dstL.is(dstH));
1039 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 | 1080 emit_arm(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1040 src2.code()*B8 | B7 | B4 | src1.code()); 1081 src2.code()*B8 | B7 | B4 | src1.code());
1041 } 1082 }
1042 1083
1043 1084
1044 void Assembler::umull(Register dstL, 1085 void Assembler::umull(Register dstL,
1045 Register dstH, 1086 Register dstH,
1046 Register src1, 1087 Register src1,
1047 Register src2, 1088 Register src2,
1048 SBit s, 1089 SBit s,
1049 Condition cond) { 1090 Condition cond) {
1050 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); 1091 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1051 ASSERT(!dstL.is(dstH)); 1092 ASSERT(!dstL.is(dstH));
1052 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | 1093 emit_arm(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1053 src2.code()*B8 | B7 | B4 | src1.code()); 1094 src2.code()*B8 | B7 | B4 | src1.code());
1054 } 1095 }
1055 1096
1056 1097
1057 // Miscellaneous arithmetic instructions. 1098 // Miscellaneous arithmetic instructions.
1058 void Assembler::clz(Register dst, Register src, Condition cond) { 1099 void Assembler::clz(Register dst, Register src, Condition cond) {
1059 // v5 and above. 1100 // v5 and above.
1060 ASSERT(!dst.is(pc) && !src.is(pc)); 1101 ASSERT(!dst.is(pc) && !src.is(pc));
1061 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | 1102 emit_arm(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1062 15*B8 | B4 | src.code()); 1103 15*B8 | B4 | src.code());
1063 } 1104 }
1064 1105
1065 1106
1066 // Status register access instructions. 1107 // Status register access instructions.
1067 void Assembler::mrs(Register dst, SRegister s, Condition cond) { 1108 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1068 ASSERT(!dst.is(pc)); 1109 ASSERT(!dst.is(pc));
1069 emit(cond | B24 | s | 15*B16 | dst.code()*B12); 1110 emit_arm(cond | B24 | s | 15*B16 | dst.code()*B12);
1070 } 1111 }
1071 1112
1072 1113
1073 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, 1114 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1074 Condition cond) { 1115 Condition cond) {
1075 ASSERT(fields >= B16 && fields < B20); // at least one field set 1116 ASSERT(fields >= B16 && fields < B20); // at least one field set
1076 Instr instr; 1117 InstrArm instr;
1077 if (!src.rm_.is_valid()) { 1118 if (!src.rm_.is_valid()) {
1078 // Immediate. 1119 // Immediate.
1079 uint32_t rotate_imm; 1120 uint32_t rotate_imm;
1080 uint32_t immed_8; 1121 uint32_t immed_8;
1081 if (MustUseIp(src.rmode_) || 1122 if (MustUseIp(src.rmode_) ||
1082 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { 1123 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1083 // Immediate operand cannot be encoded, load it first to register ip. 1124 // Immediate operand cannot be encoded, load it first to register ip.
1084 RecordRelocInfo(src.rmode_, src.imm32_); 1125 RecordRelocInfo(src.rmode_, src.imm32_);
1085 ldr(ip, MemOperand(pc, 0), cond); 1126 ldr(ip, MemOperand(pc, 0), cond);
1086 msr(fields, Operand(ip), cond); 1127 msr(fields, Operand(ip), cond);
1087 return; 1128 return;
1088 } 1129 }
1089 instr = I | rotate_imm*B8 | immed_8; 1130 instr = I | rotate_imm*B8 | immed_8;
1090 } else { 1131 } else {
1091 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed 1132 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1092 instr = src.rm_.code(); 1133 instr = src.rm_.code();
1093 } 1134 }
1094 emit(cond | instr | B24 | B21 | fields | 15*B12); 1135 emit_arm(cond | instr | B24 | B21 | fields | 15*B12);
1095 } 1136 }
1096 1137
1097 1138
1098 // Load/Store instructions. 1139 // Load/Store instructions.
1099 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { 1140 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1100 if (dst.is(pc)) { 1141 if (dst.is(pc)) {
1101 WriteRecordedPositions(); 1142 WriteRecordedPositions();
1102 } 1143 }
1103 addrmod2(cond | B26 | L, dst, src); 1144 addrmod2(cond | B26 | L, dst, src);
1104 1145
1105 // Eliminate pattern: push(r), pop(r) 1146 // Eliminate pattern: push(r), pop(r)
1106 // str(r, MemOperand(sp, 4, NegPreIndex), al) 1147 // str(r, MemOperand(sp, 4, NegPreIndex), al)
1107 // ldr(r, MemOperand(sp, 4, PostIndex), al) 1148 // ldr(r, MemOperand(sp, 4, PostIndex), al)
1108 // Both instructions can be eliminated. 1149 // Both instructions can be eliminated.
1109 int pattern_size = 2 * kInstrSize; 1150 int pattern_size = 2 * kInstrArmSize;
1110 if (FLAG_push_pop_elimination && 1151 if (FLAG_push_pop_elimination &&
1111 last_bound_pos_ <= (pc_offset() - pattern_size) && 1152 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1112 reloc_info_writer.last_pc() <= (pc_ - pattern_size) && 1153 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1113 // Pattern. 1154 // Pattern.
1114 instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) && 1155 instr_arm_at(pc_ - 1 * kInstrArmSize) ==
1115 instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) { 1156 (kPopRegPattern | dst.code() * B12) &&
1116 pc_ -= 2 * kInstrSize; 1157 instr_arm_at(pc_ - 2 * kInstrArmSize) ==
1158 (kPushRegPattern | dst.code() * B12)) {
1159 pc_ -= 2 * kInstrArmSize;
1117 if (FLAG_print_push_pop_elimination) { 1160 if (FLAG_print_push_pop_elimination) {
1118 PrintF("%x push/pop (same reg) eliminated\n", pc_offset()); 1161 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1119 } 1162 }
1120 } 1163 }
1121 } 1164 }
1122 1165
1123 1166
1124 void Assembler::str(Register src, const MemOperand& dst, Condition cond) { 1167 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1125 addrmod2(cond | B26, src, dst); 1168 addrmod2(cond | B26, src, dst);
1126 1169
1127 // Eliminate pattern: pop(), push(r) 1170 // Eliminate pattern: pop(), push(r)
1128 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al 1171 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1129 // -> str r, [sp, 0], al 1172 // -> str r, [sp, 0], al
1130 int pattern_size = 2 * kInstrSize; 1173 int pattern_size = 2 * kInstrArmSize;
1131 if (FLAG_push_pop_elimination && 1174 if (FLAG_push_pop_elimination &&
1132 last_bound_pos_ <= (pc_offset() - pattern_size) && 1175 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1133 reloc_info_writer.last_pc() <= (pc_ - pattern_size) && 1176 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1134 // Pattern. 1177 // Pattern.
1135 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) && 1178 instr_arm_at(pc_ - 1 * kInstrArmSize) ==
1136 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) { 1179 (kPushRegPattern | src.code() * B12) &&
1137 pc_ -= 2 * kInstrSize; 1180 instr_arm_at(pc_ - 2 * kInstrArmSize) == kPopInstruction) {
1138 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12); 1181 pc_ -= 2 * kInstrArmSize;
1182 emit_arm(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1139 if (FLAG_print_push_pop_elimination) { 1183 if (FLAG_print_push_pop_elimination) {
1140 PrintF("%x pop()/push(reg) eliminated\n", pc_offset()); 1184 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1141 } 1185 }
1142 } 1186 }
1143 } 1187 }
1144 1188
1145 1189
1146 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) { 1190 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1147 addrmod2(cond | B26 | B | L, dst, src); 1191 addrmod2(cond | B26 | B | L, dst, src);
1148 } 1192 }
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1183 1227
1184 addrmod4(cond | B27 | am | L, base, dst); 1228 addrmod4(cond | B27 | am | L, base, dst);
1185 1229
1186 // Emit the constant pool after a function return implemented by ldm ..{..pc}. 1230 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1187 if (cond == al && (dst & pc.bit()) != 0) { 1231 if (cond == al && (dst & pc.bit()) != 0) {
1188 // There is a slight chance that the ldm instruction was actually a call, 1232 // There is a slight chance that the ldm instruction was actually a call,
1189 // in which case it would be wrong to return into the constant pool; we 1233 // in which case it would be wrong to return into the constant pool; we
1190 // recognize this case by checking if the emission of the pool was blocked 1234 // recognize this case by checking if the emission of the pool was blocked
1191 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is 1235 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1192 // the case, we emit a jump over the pool. 1236 // the case, we emit a jump over the pool.
1193 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize); 1237 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrArmSize);
1194 } 1238 }
1195 } 1239 }
1196 1240
1197 1241
1198 void Assembler::stm(BlockAddrMode am, 1242 void Assembler::stm(BlockAddrMode am,
1199 Register base, 1243 Register base,
1200 RegList src, 1244 RegList src,
1201 Condition cond) { 1245 Condition cond) {
1202 addrmod4(cond | B27 | am, base, src); 1246 addrmod4(cond | B27 | am, base, src);
1203 } 1247 }
1204 1248
1205 1249
1206 // Semaphore instructions. 1250 // Semaphore instructions.
1207 void Assembler::swp(Register dst, Register src, Register base, Condition cond) { 1251 void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
1208 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc)); 1252 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1209 ASSERT(!dst.is(base) && !src.is(base)); 1253 ASSERT(!dst.is(base) && !src.is(base));
1210 emit(cond | P | base.code()*B16 | dst.code()*B12 | 1254 emit_arm(cond | P | base.code()*B16 | dst.code()*B12 |
1211 B7 | B4 | src.code()); 1255 B7 | B4 | src.code());
1212 } 1256 }
1213 1257
1214 1258
1215 void Assembler::swpb(Register dst, 1259 void Assembler::swpb(Register dst,
1216 Register src, 1260 Register src,
1217 Register base, 1261 Register base,
1218 Condition cond) { 1262 Condition cond) {
1219 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc)); 1263 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1220 ASSERT(!dst.is(base) && !src.is(base)); 1264 ASSERT(!dst.is(base) && !src.is(base));
1221 emit(cond | P | B | base.code()*B16 | dst.code()*B12 | 1265 emit_arm(cond | P | B | base.code()*B16 | dst.code()*B12 |
1222 B7 | B4 | src.code()); 1266 B7 | B4 | src.code());
1223 } 1267 }
1224 1268
1225 1269
1226 // Exception-generating instructions and debugging support. 1270 // Exception-generating instructions and debugging support.
1227 void Assembler::stop(const char* msg) { 1271 void Assembler::stop(const char* msg) {
1228 #if !defined(__arm__) 1272 #if !defined(__arm__)
1229 // The simulator handles these special instructions and stops execution. 1273 // The simulator handles these special instructions and stops execution.
1230 emit(15 << 28 | ((intptr_t) msg)); 1274 emit_arm(15 << 28 | ((intptr_t) msg));
1231 #else 1275 #else
1232 // Just issue a simple break instruction for now. Alternatively we could use 1276 // Just issue a simple break instruction for now. Alternatively we could use
1233 // the swi(0x9f0001) instruction on Linux. 1277 // the swi(0x9f0001) instruction on Linux.
1234 bkpt(0); 1278 bkpt(0);
1235 #endif 1279 #endif
1236 } 1280 }
1237 1281
1238 1282
1239 void Assembler::bkpt(uint32_t imm16) { // v5 and above 1283 void Assembler::bkpt(uint32_t imm16) { // v5 and above
1240 ASSERT(is_uint16(imm16)); 1284 ASSERT(is_uint16(imm16));
1241 emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf)); 1285 emit_arm(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1242 } 1286 }
1243 1287
1244 1288
1245 void Assembler::swi(uint32_t imm24, Condition cond) { 1289 void Assembler::swi(uint32_t imm24, Condition cond) {
1246 ASSERT(is_uint24(imm24)); 1290 ASSERT(is_uint24(imm24));
1247 emit(cond | 15*B24 | imm24); 1291 emit_arm(cond | 15*B24 | imm24);
1248 } 1292 }
1249 1293
1250 1294
1251 // Coprocessor instructions. 1295 // Coprocessor instructions.
1252 void Assembler::cdp(Coprocessor coproc, 1296 void Assembler::cdp(Coprocessor coproc,
1253 int opcode_1, 1297 int opcode_1,
1254 CRegister crd, 1298 CRegister crd,
1255 CRegister crn, 1299 CRegister crn,
1256 CRegister crm, 1300 CRegister crm,
1257 int opcode_2, 1301 int opcode_2,
1258 Condition cond) { 1302 Condition cond) {
1259 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2)); 1303 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1260 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 | 1304 emit_arm(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1261 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); 1305 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1262 } 1306 }
1263 1307
1264 1308
1265 void Assembler::cdp2(Coprocessor coproc, 1309 void Assembler::cdp2(Coprocessor coproc,
1266 int opcode_1, 1310 int opcode_1,
1267 CRegister crd, 1311 CRegister crd,
1268 CRegister crn, 1312 CRegister crn,
1269 CRegister crm, 1313 CRegister crm,
1270 int opcode_2) { // v5 and above 1314 int opcode_2) { // v5 and above
1271 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv)); 1315 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1272 } 1316 }
1273 1317
1274 1318
1275 void Assembler::mcr(Coprocessor coproc, 1319 void Assembler::mcr(Coprocessor coproc,
1276 int opcode_1, 1320 int opcode_1,
1277 Register rd, 1321 Register rd,
1278 CRegister crn, 1322 CRegister crn,
1279 CRegister crm, 1323 CRegister crm,
1280 int opcode_2, 1324 int opcode_2,
1281 Condition cond) { 1325 Condition cond) {
1282 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); 1326 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1283 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 | 1327 emit_arm(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1284 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); 1328 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1285 } 1329 }
1286 1330
1287 1331
1288 void Assembler::mcr2(Coprocessor coproc, 1332 void Assembler::mcr2(Coprocessor coproc,
1289 int opcode_1, 1333 int opcode_1,
1290 Register rd, 1334 Register rd,
1291 CRegister crn, 1335 CRegister crn,
1292 CRegister crm, 1336 CRegister crm,
1293 int opcode_2) { // v5 and above 1337 int opcode_2) { // v5 and above
1294 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv)); 1338 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1295 } 1339 }
1296 1340
1297 1341
1298 void Assembler::mrc(Coprocessor coproc, 1342 void Assembler::mrc(Coprocessor coproc,
1299 int opcode_1, 1343 int opcode_1,
1300 Register rd, 1344 Register rd,
1301 CRegister crn, 1345 CRegister crn,
1302 CRegister crm, 1346 CRegister crm,
1303 int opcode_2, 1347 int opcode_2,
1304 Condition cond) { 1348 Condition cond) {
1305 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); 1349 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1306 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 | 1350 emit_arm(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1307 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); 1351 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1308 } 1352 }
1309 1353
1310 1354
1311 void Assembler::mrc2(Coprocessor coproc, 1355 void Assembler::mrc2(Coprocessor coproc,
1312 int opcode_1, 1356 int opcode_1,
1313 Register rd, 1357 Register rd,
1314 CRegister crn, 1358 CRegister crn,
1315 CRegister crm, 1359 CRegister crm,
1316 int opcode_2) { // v5 and above 1360 int opcode_2) { // v5 and above
(...skipping 11 matching lines...) Expand all
1328 1372
1329 1373
1330 void Assembler::ldc(Coprocessor coproc, 1374 void Assembler::ldc(Coprocessor coproc,
1331 CRegister crd, 1375 CRegister crd,
1332 Register rn, 1376 Register rn,
1333 int option, 1377 int option,
1334 LFlag l, 1378 LFlag l,
1335 Condition cond) { 1379 Condition cond) {
1336 // Unindexed addressing. 1380 // Unindexed addressing.
1337 ASSERT(is_uint8(option)); 1381 ASSERT(is_uint8(option));
1338 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | 1382 emit_arm(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1339 coproc*B8 | (option & 255)); 1383 coproc*B8 | (option & 255));
1340 } 1384 }
1341 1385
1342 1386
1343 void Assembler::ldc2(Coprocessor coproc, 1387 void Assembler::ldc2(Coprocessor coproc,
1344 CRegister crd, 1388 CRegister crd,
1345 const MemOperand& src, 1389 const MemOperand& src,
1346 LFlag l) { // v5 and above 1390 LFlag l) { // v5 and above
1347 ldc(coproc, crd, src, l, static_cast<Condition>(nv)); 1391 ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1348 } 1392 }
(...skipping 18 matching lines...) Expand all
1367 1411
1368 1412
1369 void Assembler::stc(Coprocessor coproc, 1413 void Assembler::stc(Coprocessor coproc,
1370 CRegister crd, 1414 CRegister crd,
1371 Register rn, 1415 Register rn,
1372 int option, 1416 int option,
1373 LFlag l, 1417 LFlag l,
1374 Condition cond) { 1418 Condition cond) {
1375 // Unindexed addressing. 1419 // Unindexed addressing.
1376 ASSERT(is_uint8(option)); 1420 ASSERT(is_uint8(option));
1377 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 | 1421 emit_arm(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1378 coproc*B8 | (option & 255)); 1422 coproc*B8 | (option & 255));
1379 } 1423 }
1380 1424
1381 1425
1382 void Assembler::stc2(Coprocessor 1426 void Assembler::stc2(Coprocessor
1383 coproc, CRegister crd, 1427 coproc, CRegister crd,
1384 const MemOperand& dst, 1428 const MemOperand& dst,
1385 LFlag l) { // v5 and above 1429 LFlag l) { // v5 and above
1386 stc(coproc, crd, dst, l, static_cast<Condition>(nv)); 1430 stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1387 } 1431 }
(...skipping 12 matching lines...) Expand all
1400 void Assembler::vldr(const DwVfpRegister dst, 1444 void Assembler::vldr(const DwVfpRegister dst,
1401 const Register base, 1445 const Register base,
1402 int offset, 1446 int offset,
1403 const Condition cond) { 1447 const Condition cond) {
1404 // Ddst = MEM(Rbase + offset). 1448 // Ddst = MEM(Rbase + offset).
1405 // Instruction details available in ARM DDI 0406A, A8-628. 1449 // Instruction details available in ARM DDI 0406A, A8-628.
1406 // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) | 1450 // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
1407 // Vdst(15-12) | 1011(11-8) | offset 1451 // Vdst(15-12) | 1011(11-8) | offset
1408 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1452 ASSERT(CpuFeatures::IsEnabled(VFP3));
1409 ASSERT(offset % 4 == 0); 1453 ASSERT(offset % 4 == 0);
1410 emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | 1454 emit_arm(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
1411 0xB*B8 | ((offset / 4) & 255)); 1455 0xB*B8 | ((offset / 4) & 255));
1412 } 1456 }
1413 1457
1414 1458
1415 void Assembler::vstr(const DwVfpRegister src, 1459 void Assembler::vstr(const DwVfpRegister src,
1416 const Register base, 1460 const Register base,
1417 int offset, 1461 int offset,
1418 const Condition cond) { 1462 const Condition cond) {
1419 // MEM(Rbase + offset) = Dsrc. 1463 // MEM(Rbase + offset) = Dsrc.
1420 // Instruction details available in ARM DDI 0406A, A8-786. 1464 // Instruction details available in ARM DDI 0406A, A8-786.
1421 // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) | 1465 // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
1422 // Vsrc(15-12) | 1011(11-8) | (offset/4) 1466 // Vsrc(15-12) | 1011(11-8) | (offset/4)
1423 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1467 ASSERT(CpuFeatures::IsEnabled(VFP3));
1424 ASSERT(offset % 4 == 0); 1468 ASSERT(offset % 4 == 0);
1425 emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 | 1469 emit_arm(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
1426 0xB*B8 | ((offset / 4) & 255)); 1470 0xB*B8 | ((offset / 4) & 255));
1427 } 1471 }
1428 1472
1429 1473
1430 void Assembler::vmov(const DwVfpRegister dst, 1474 void Assembler::vmov(const DwVfpRegister dst,
1431 const Register src1, 1475 const Register src1,
1432 const Register src2, 1476 const Register src2,
1433 const Condition cond) { 1477 const Condition cond) {
1434 // Dm = <Rt,Rt2>. 1478 // Dm = <Rt,Rt2>.
1435 // Instruction details available in ARM DDI 0406A, A8-646. 1479 // Instruction details available in ARM DDI 0406A, A8-646.
1436 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | 1480 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
1437 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm 1481 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1438 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1482 ASSERT(CpuFeatures::IsEnabled(VFP3));
1439 ASSERT(!src1.is(pc) && !src2.is(pc)); 1483 ASSERT(!src1.is(pc) && !src2.is(pc));
1440 emit(cond | 0xC*B24 | B22 | src2.code()*B16 | 1484 emit_arm(cond | 0xC*B24 | B22 | src2.code()*B16 |
1441 src1.code()*B12 | 0xB*B8 | B4 | dst.code()); 1485 src1.code()*B12 | 0xB*B8 | B4 | dst.code());
1442 } 1486 }
1443 1487
1444 1488
1445 void Assembler::vmov(const Register dst1, 1489 void Assembler::vmov(const Register dst1,
1446 const Register dst2, 1490 const Register dst2,
1447 const DwVfpRegister src, 1491 const DwVfpRegister src,
1448 const Condition cond) { 1492 const Condition cond) {
1449 // <Rt,Rt2> = Dm. 1493 // <Rt,Rt2> = Dm.
1450 // Instruction details available in ARM DDI 0406A, A8-646. 1494 // Instruction details available in ARM DDI 0406A, A8-646.
1451 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | 1495 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
1452 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm 1496 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1453 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1497 ASSERT(CpuFeatures::IsEnabled(VFP3));
1454 ASSERT(!dst1.is(pc) && !dst2.is(pc)); 1498 ASSERT(!dst1.is(pc) && !dst2.is(pc));
1455 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | 1499 emit_arm(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
1456 dst1.code()*B12 | 0xB*B8 | B4 | src.code()); 1500 dst1.code()*B12 | 0xB*B8 | B4 | src.code());
1457 } 1501 }
1458 1502
1459 1503
1460 void Assembler::vmov(const SwVfpRegister dst, 1504 void Assembler::vmov(const SwVfpRegister dst,
1461 const Register src, 1505 const Register src,
1462 const Condition cond) { 1506 const Condition cond) {
1463 // Sn = Rt. 1507 // Sn = Rt.
1464 // Instruction details available in ARM DDI 0406A, A8-642. 1508 // Instruction details available in ARM DDI 0406A, A8-642.
1465 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | 1509 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
1466 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) 1510 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
1467 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1511 ASSERT(CpuFeatures::IsEnabled(VFP3));
1468 ASSERT(!src.is(pc)); 1512 ASSERT(!src.is(pc));
1469 emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 | 1513 emit_arm(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
1470 src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4); 1514 src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
1471 } 1515 }
1472 1516
1473 1517
1474 void Assembler::vmov(const Register dst, 1518 void Assembler::vmov(const Register dst,
1475 const SwVfpRegister src, 1519 const SwVfpRegister src,
1476 const Condition cond) { 1520 const Condition cond) {
1477 // Rt = Sn. 1521 // Rt = Sn.
1478 // Instruction details available in ARM DDI 0406A, A8-642. 1522 // Instruction details available in ARM DDI 0406A, A8-642.
1479 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | 1523 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
1480 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) 1524 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
1481 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1525 ASSERT(CpuFeatures::IsEnabled(VFP3));
1482 ASSERT(!dst.is(pc)); 1526 ASSERT(!dst.is(pc));
1483 emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 | 1527 emit_arm(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
1484 dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4); 1528 dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
1485 } 1529 }
1486 1530
1487 1531
1488 void Assembler::vcvt(const DwVfpRegister dst, 1532 void Assembler::vcvt(const DwVfpRegister dst,
1489 const SwVfpRegister src, 1533 const SwVfpRegister src,
1490 const Condition cond) { 1534 const Condition cond) {
1491 // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd). 1535 // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
1492 // Instruction details available in ARM DDI 0406A, A8-576. 1536 // Instruction details available in ARM DDI 0406A, A8-576.
1493 // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) | 1537 // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
1494 // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0) 1538 // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1495 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1539 ASSERT(CpuFeatures::IsEnabled(VFP3));
1496 emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 | 1540 emit_arm(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
1497 dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 | 1541 dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
1498 (0x1 & src.code())*B5 | (src.code() >> 1)); 1542 (0x1 & src.code())*B5 | (src.code() >> 1));
1499 } 1543 }
1500 1544
1501 1545
1502 void Assembler::vcvt(const SwVfpRegister dst, 1546 void Assembler::vcvt(const SwVfpRegister dst,
1503 const DwVfpRegister src, 1547 const DwVfpRegister src,
1504 const Condition cond) { 1548 const Condition cond) {
1505 // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd). 1549 // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
1506 // Instruction details available in ARM DDI 0406A, A8-576. 1550 // Instruction details available in ARM DDI 0406A, A8-576.
1507 // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)| 1551 // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
1508 // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0) 1552 // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1509 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1553 ASSERT(CpuFeatures::IsEnabled(VFP3));
1510 emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 | 1554 emit_arm(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
1511 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 | 1555 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
1512 0x5*B9 | B8 | B7 | B6 | src.code()); 1556 0x5*B9 | B8 | B7 | B6 | src.code());
1513 } 1557 }
1514 1558
1515 1559
1516 void Assembler::vadd(const DwVfpRegister dst, 1560 void Assembler::vadd(const DwVfpRegister dst,
1517 const DwVfpRegister src1, 1561 const DwVfpRegister src1,
1518 const DwVfpRegister src2, 1562 const DwVfpRegister src2,
1519 const Condition cond) { 1563 const Condition cond) {
1520 // Dd = vadd(Dn, Dm) double precision floating point addition. 1564 // Dd = vadd(Dn, Dm) double precision floating point addition.
1521 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 1565 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1522 // Instruction details available in ARM DDI 0406A, A8-536. 1566 // Instruction details available in ARM DDI 0406A, A8-536.
1523 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | 1567 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
1524 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) 1568 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1525 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1569 ASSERT(CpuFeatures::IsEnabled(VFP3));
1526 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | 1570 emit_arm(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
1527 dst.code()*B12 | 0x5*B9 | B8 | src2.code()); 1571 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1528 } 1572 }
1529 1573
1530 1574
1531 void Assembler::vsub(const DwVfpRegister dst, 1575 void Assembler::vsub(const DwVfpRegister dst,
1532 const DwVfpRegister src1, 1576 const DwVfpRegister src1,
1533 const DwVfpRegister src2, 1577 const DwVfpRegister src2,
1534 const Condition cond) { 1578 const Condition cond) {
1535 // Dd = vsub(Dn, Dm) double precision floating point subtraction. 1579 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
1536 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 1580 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1537 // Instruction details available in ARM DDI 0406A, A8-784. 1581 // Instruction details available in ARM DDI 0406A, A8-784.
1538 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | 1582 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
1539 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0) 1583 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1540 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1584 ASSERT(CpuFeatures::IsEnabled(VFP3));
1541 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | 1585 emit_arm(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
1542 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); 1586 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
1543 } 1587 }
1544 1588
1545 1589
1546 void Assembler::vmul(const DwVfpRegister dst, 1590 void Assembler::vmul(const DwVfpRegister dst,
1547 const DwVfpRegister src1, 1591 const DwVfpRegister src1,
1548 const DwVfpRegister src2, 1592 const DwVfpRegister src2,
1549 const Condition cond) { 1593 const Condition cond) {
1550 // Dd = vmul(Dn, Dm) double precision floating point multiplication. 1594 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
1551 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 1595 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1552 // Instruction details available in ARM DDI 0406A, A8-784. 1596 // Instruction details available in ARM DDI 0406A, A8-784.
1553 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) | 1597 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
1554 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) 1598 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1555 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1599 ASSERT(CpuFeatures::IsEnabled(VFP3));
1556 emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 | 1600 emit_arm(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
1557 dst.code()*B12 | 0x5*B9 | B8 | src2.code()); 1601 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1558 } 1602 }
1559 1603
1560 1604
1561 void Assembler::vdiv(const DwVfpRegister dst, 1605 void Assembler::vdiv(const DwVfpRegister dst,
1562 const DwVfpRegister src1, 1606 const DwVfpRegister src1,
1563 const DwVfpRegister src2, 1607 const DwVfpRegister src2,
1564 const Condition cond) { 1608 const Condition cond) {
1565 // Dd = vdiv(Dn, Dm) double precision floating point division. 1609 // Dd = vdiv(Dn, Dm) double precision floating point division.
1566 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 1610 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1567 // Instruction details available in ARM DDI 0406A, A8-584. 1611 // Instruction details available in ARM DDI 0406A, A8-584.
1568 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) | 1612 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
1569 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0) 1613 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1570 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1614 ASSERT(CpuFeatures::IsEnabled(VFP3));
1571 emit(cond | 0xE*B24 | B23 | src1.code()*B16 | 1615 emit_arm(cond | 0xE*B24 | B23 | src1.code()*B16 |
1572 dst.code()*B12 | 0x5*B9 | B8 | src2.code()); 1616 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1573 } 1617 }
1574 1618
1575 1619
1576 void Assembler::vcmp(const DwVfpRegister src1, 1620 void Assembler::vcmp(const DwVfpRegister src1,
1577 const DwVfpRegister src2, 1621 const DwVfpRegister src2,
1578 const SBit s, 1622 const SBit s,
1579 const Condition cond) { 1623 const Condition cond) {
1580 // vcmp(Dd, Dm) double precision floating point comparison. 1624 // vcmp(Dd, Dm) double precision floating point comparison.
1581 // Instruction details available in ARM DDI 0406A, A8-570. 1625 // Instruction details available in ARM DDI 0406A, A8-570.
1582 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) | 1626 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
1583 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0) 1627 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
1584 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1628 ASSERT(CpuFeatures::IsEnabled(VFP3));
1585 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | 1629 emit_arm(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
1586 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); 1630 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
1587 } 1631 }
1588 1632
1589 1633
1590 void Assembler::vmrs(Register dst, Condition cond) { 1634 void Assembler::vmrs(Register dst, Condition cond) {
1591 // Instruction details available in ARM DDI 0406A, A8-652. 1635 // Instruction details available in ARM DDI 0406A, A8-652.
1592 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | 1636 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
1593 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) 1637 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
1594 ASSERT(CpuFeatures::IsEnabled(VFP3)); 1638 ASSERT(CpuFeatures::IsEnabled(VFP3));
1595 emit(cond | 0xE*B24 | 0xF*B20 | B16 | 1639 emit_arm(cond | 0xE*B24 | 0xF*B20 | B16 |
1596 dst.code()*B12 | 0xA*B8 | B4); 1640 dst.code()*B12 | 0xA*B8 | B4);
1597 } 1641 }
1598 1642
1599 1643
1600 // Pseudo instructions. 1644 // Pseudo instructions.
1601 void Assembler::lea(Register dst, 1645 void Assembler::lea(Register dst,
1602 const MemOperand& x, 1646 const MemOperand& x,
1603 SBit s, 1647 SBit s,
1604 Condition cond) { 1648 Condition cond) {
1605 int am = x.am_; 1649 int am = x.am_;
(...skipping 21 matching lines...) Expand all
1627 1671
1628 1672
1629 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { 1673 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
1630 uint32_t dummy1; 1674 uint32_t dummy1;
1631 uint32_t dummy2; 1675 uint32_t dummy2;
1632 return fits_shifter(imm32, &dummy1, &dummy2, NULL); 1676 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
1633 } 1677 }
1634 1678
1635 1679
1636 void Assembler::BlockConstPoolFor(int instructions) { 1680 void Assembler::BlockConstPoolFor(int instructions) {
1637 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize); 1681 BlockConstPoolBefore(pc_offset() + instructions * kInstrArmSize);
1638 } 1682 }
1639 1683
1640 1684
1641 // Debugging. 1685 // Debugging.
1642 void Assembler::RecordJSReturn() { 1686 void Assembler::RecordJSReturn() {
1643 WriteRecordedPositions(); 1687 WriteRecordedPositions();
1644 CheckBuffer(); 1688 CheckBuffer();
1645 RecordRelocInfo(RelocInfo::JS_RETURN); 1689 RecordRelocInfo(RelocInfo::JS_RETURN);
1646 } 1690 }
1647 1691
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
1745 // Adjust code for new modes. 1789 // Adjust code for new modes.
1746 ASSERT(RelocInfo::IsJSReturn(rmode) 1790 ASSERT(RelocInfo::IsJSReturn(rmode)
1747 || RelocInfo::IsComment(rmode) 1791 || RelocInfo::IsComment(rmode)
1748 || RelocInfo::IsPosition(rmode)); 1792 || RelocInfo::IsPosition(rmode));
1749 // These modes do not need an entry in the constant pool. 1793 // These modes do not need an entry in the constant pool.
1750 } else { 1794 } else {
1751 ASSERT(num_prinfo_ < kMaxNumPRInfo); 1795 ASSERT(num_prinfo_ < kMaxNumPRInfo);
1752 prinfo_[num_prinfo_++] = rinfo; 1796 prinfo_[num_prinfo_++] = rinfo;
1753 // Make sure the constant pool is not emitted in place of the next 1797 // Make sure the constant pool is not emitted in place of the next
1754 // instruction for which we just recorded relocation info. 1798 // instruction for which we just recorded relocation info.
1755 BlockConstPoolBefore(pc_offset() + kInstrSize); 1799 BlockConstPoolBefore(pc_offset() + kInstrArmSize);
1756 } 1800 }
1757 if (rinfo.rmode() != RelocInfo::NONE) { 1801 if (rinfo.rmode() != RelocInfo::NONE) {
1758 // Don't record external references unless the heap will be serialized. 1802 // Don't record external references unless the heap will be serialized.
1759 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { 1803 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
1760 #ifdef DEBUG 1804 #ifdef DEBUG
1761 if (!Serializer::enabled()) { 1805 if (!Serializer::enabled()) {
1762 Serializer::TooLateToEnableNow(); 1806 Serializer::TooLateToEnableNow();
1763 } 1807 }
1764 #endif 1808 #endif
1765 if (!Serializer::enabled() && !FLAG_debug_code) { 1809 if (!Serializer::enabled() && !FLAG_debug_code) {
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1806 if (pc_offset() < no_const_pool_before_) { 1850 if (pc_offset() < no_const_pool_before_) {
1807 // Emission is currently blocked; make sure we try again as soon as 1851 // Emission is currently blocked; make sure we try again as soon as
1808 // possible. 1852 // possible.
1809 next_buffer_check_ = no_const_pool_before_; 1853 next_buffer_check_ = no_const_pool_before_;
1810 1854
1811 // Something is wrong if emission is forced and blocked at the same time. 1855 // Something is wrong if emission is forced and blocked at the same time.
1812 ASSERT(!force_emit); 1856 ASSERT(!force_emit);
1813 return; 1857 return;
1814 } 1858 }
1815 1859
1816 int jump_instr = require_jump ? kInstrSize : 0; 1860 int jump_instr = require_jump ? kInstrArmSize : 0;
1817 1861
1818 // Check that the code buffer is large enough before emitting the constant 1862 // Check that the code buffer is large enough before emitting the constant
1819 // pool and relocation information (include the jump over the pool and the 1863 // pool and relocation information (include the jump over the pool and the
1820 // constant pool marker). 1864 // constant pool marker).
1821 int max_needed_space = 1865 int max_needed_space =
1822 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize); 1866 jump_instr + kInstrArmSize + num_prinfo_*(kInstrArmSize + kMaxRelocSize);
1823 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer(); 1867 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
1824 1868
1825 // Block recursive calls to CheckConstPool. 1869 // Block recursive calls to CheckConstPool.
1826 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize + 1870 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrArmSize +
1827 num_prinfo_*kInstrSize); 1871 num_prinfo_*kInstrArmSize);
1828 // Don't bother to check for the emit calls below. 1872 // Don't bother to check for the emit calls below.
1829 next_buffer_check_ = no_const_pool_before_; 1873 next_buffer_check_ = no_const_pool_before_;
1830 1874
1831 // Emit jump over constant pool if necessary. 1875 // Emit jump over constant pool if necessary.
1832 Label after_pool; 1876 Label after_pool;
1833 if (require_jump) b(&after_pool); 1877 if (require_jump) b(&after_pool);
1834 1878
1835 RecordComment("[ Constant Pool"); 1879 RecordComment("[ Constant Pool");
1836 1880
1837 // Put down constant pool marker "Undefined instruction" as specified by 1881 // Put down constant pool marker "Undefined instruction" as specified by
1838 // A3.1 Instruction set encoding. 1882 // A3.1 Instruction set encoding.
1839 emit(0x03000000 | num_prinfo_); 1883 emit_int32(0x03000000 | num_prinfo_);
1840 1884
1841 // Emit constant pool entries. 1885 // Emit constant pool entries.
1842 for (int i = 0; i < num_prinfo_; i++) { 1886 for (int i = 0; i < num_prinfo_; i++) {
1843 RelocInfo& rinfo = prinfo_[i]; 1887 RelocInfo& rinfo = prinfo_[i];
1844 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && 1888 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1845 rinfo.rmode() != RelocInfo::POSITION && 1889 rinfo.rmode() != RelocInfo::POSITION &&
1846 rinfo.rmode() != RelocInfo::STATEMENT_POSITION); 1890 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
1847 Instr instr = instr_at(rinfo.pc()); 1891 InstrArm instr = instr_arm_at(rinfo.pc());
1848 1892
1849 // Instruction to patch must be a ldr/str [pc, #offset]. 1893 // Instruction to patch must be a ldr/str [pc, #offset].
1850 // P and U set, B and W clear, Rn == pc, offset12 still 0. 1894 // P and U set, B and W clear, Rn == pc, offset12 still 0.
1851 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) == 1895 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
1852 (2*B25 | P | U | pc.code()*B16)); 1896 (2*B25 | P | U | pc.code()*B16));
1853 int delta = pc_ - rinfo.pc() - 8; 1897 int delta = pc_ - rinfo.pc() - 8;
1854 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 1898 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
1855 if (delta < 0) { 1899 if (delta < 0) {
1856 instr &= ~U; 1900 instr &= ~U;
1857 delta = -delta; 1901 delta = -delta;
1858 } 1902 }
1859 ASSERT(is_uint12(delta)); 1903 ASSERT(is_uint12(delta));
1860 instr_at_put(rinfo.pc(), instr + delta); 1904 instr_arm_at_put(rinfo.pc(), instr + delta);
1861 emit(rinfo.data()); 1905 emit_int32(rinfo.data());
1862 } 1906 }
1863 num_prinfo_ = 0; 1907 num_prinfo_ = 0;
1864 last_const_pool_end_ = pc_offset(); 1908 last_const_pool_end_ = pc_offset();
1865 1909
1866 RecordComment("]"); 1910 RecordComment("]");
1867 1911
1868 if (after_pool.is_linked()) { 1912 if (after_pool.is_linked()) {
1869 bind(&after_pool); 1913 bind(&after_pool);
1870 } 1914 }
1871 1915
1872 // Since a constant pool was just emitted, move the check offset forward by 1916 // Since a constant pool was just emitted, move the check offset forward by
1873 // the standard interval. 1917 // the standard interval.
1874 next_buffer_check_ = pc_offset() + kCheckConstInterval; 1918 next_buffer_check_ = pc_offset() + kCheckConstInterval;
1875 } 1919 }
1876 1920
1877 1921
1878 } } // namespace v8::internal 1922 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/assembler-thumb2.h ('k') | src/arm/assembler-thumb2-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698