| Index: src/arm/assembler-thumb2.cc
|
| ===================================================================
|
| --- src/arm/assembler-thumb2.cc (revision 4001)
|
| +++ src/arm/assembler-thumb2.cc (working copy)
|
| @@ -182,14 +182,14 @@
|
|
|
| void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
|
| // Patch the code at the current address with the supplied instructions.
|
| - Instr* pc = reinterpret_cast<Instr*>(pc_);
|
| - Instr* instr = reinterpret_cast<Instr*>(instructions);
|
| + InstrArm* pc = reinterpret_cast<InstrArm*>(pc_);
|
| + InstrArm* instr = reinterpret_cast<InstrArm*>(instructions);
|
| for (int i = 0; i < instruction_count; i++) {
|
| *(pc + i) = *(instr + i);
|
| }
|
|
|
| // Indicate that code has changed.
|
| - CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
|
| + CPU::FlushICache(pc_, instruction_count * Assembler::kInstrArmSize);
|
| }
|
|
|
|
|
| @@ -290,25 +290,6 @@
|
| P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
|
| I = 1 << 25, // immediate shifter operand (or not)
|
|
|
| - B4 = 1 << 4,
|
| - B5 = 1 << 5,
|
| - B6 = 1 << 6,
|
| - B7 = 1 << 7,
|
| - B8 = 1 << 8,
|
| - B9 = 1 << 9,
|
| - B12 = 1 << 12,
|
| - B16 = 1 << 16,
|
| - B18 = 1 << 18,
|
| - B19 = 1 << 19,
|
| - B20 = 1 << 20,
|
| - B21 = 1 << 21,
|
| - B22 = 1 << 22,
|
| - B23 = 1 << 23,
|
| - B24 = 1 << 24,
|
| - B25 = 1 << 25,
|
| - B26 = 1 << 26,
|
| - B27 = 1 << 27,
|
| -
|
| // Instruction bit masks.
|
| RdMask = 15 << 12, // in str instruction
|
| CondMask = 15 << 28,
|
| @@ -322,20 +303,20 @@
|
|
|
|
|
| // add(sp, sp, 4) instruction (aka Pop())
|
| -static const Instr kPopInstruction =
|
| +static const InstrArm kPopInstruction =
|
| al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
|
| // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
|
| // register r is not encoded.
|
| -static const Instr kPushRegPattern =
|
| +static const InstrArm kPushRegPattern =
|
| al | B26 | 4 | NegPreIndex | sp.code() * B16;
|
| // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
|
| // register r is not encoded.
|
| -static const Instr kPopRegPattern =
|
| +static const InstrArm kPopRegPattern =
|
| al | B26 | L | 4 | PostIndex | sp.code() * B16;
|
| // mov lr, pc
|
| -const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
|
| +const InstrArm kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
|
| // ldr pc, [pc, #XXX]
|
| -const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
|
| +const InstrArm kLdrPCPattern = al | B26 | L | pc.code() * B16;
|
|
|
| // Spare buffer.
|
| static const int kMinimalBufferSize = 4*KB;
|
| @@ -381,6 +362,7 @@
|
| current_position_ = RelocInfo::kNoPosition;
|
| written_statement_position_ = current_statement_position_;
|
| written_position_ = current_position_;
|
| + thumb_mode_ = false;
|
| }
|
|
|
|
|
| @@ -432,7 +414,7 @@
|
|
|
|
|
| int Assembler::target_at(int pos) {
|
| - Instr instr = instr_at(pos);
|
| + InstrArm instr = instr_arm_at(pos);
|
| if ((instr & ~Imm24Mask) == 0) {
|
| // Emitted label constant, not part of a branch.
|
| return instr - (Code::kHeaderSize - kHeapObjectTag);
|
| @@ -448,12 +430,12 @@
|
|
|
|
|
| void Assembler::target_at_put(int pos, int target_pos) {
|
| - Instr instr = instr_at(pos);
|
| + InstrArm instr = instr_arm_at(pos);
|
| if ((instr & ~Imm24Mask) == 0) {
|
| ASSERT(target_pos == kEndOfChain || target_pos >= 0);
|
| // Emitted label constant, not part of a branch.
|
| // Make label relative to Code* of generated Code object.
|
| - instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
|
| + instr_arm_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
|
| return;
|
| }
|
| int imm26 = target_pos - (pos + kPcLoadDelta);
|
| @@ -468,7 +450,7 @@
|
| }
|
| int imm24 = imm26 >> 2;
|
| ASSERT(is_int24(imm24));
|
| - instr_at_put(pos, instr | (imm24 & Imm24Mask));
|
| + instr_arm_at_put(pos, instr | (imm24 & Imm24Mask));
|
| }
|
|
|
|
|
| @@ -482,7 +464,7 @@
|
| PrintF("unbound label");
|
| while (l.is_linked()) {
|
| PrintF("@ %d ", l.pos());
|
| - Instr instr = instr_at(l.pos());
|
| + InstrArm instr = instr_arm_at(l.pos());
|
| if ((instr & ~Imm24Mask) == 0) {
|
| PrintF("value\n");
|
| } else {
|
| @@ -589,7 +571,7 @@
|
| static bool fits_shifter(uint32_t imm32,
|
| uint32_t* rotate_imm,
|
| uint32_t* immed_8,
|
| - Instr* instr) {
|
| + InstrArm* instr) {
|
| // imm32 must be unsigned.
|
| for (int rot = 0; rot < 16; rot++) {
|
| uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
|
| @@ -629,7 +611,78 @@
|
| }
|
|
|
|
|
| -void Assembler::addrmod1(Instr instr,
|
| +void Assembler::DataProcessing(Condition cond, Opcode op, SBit s,
|
| + Register rn,
|
| + Register rd,
|
| + const Operand& x) {
|
| + if (cond != al) {
|
| + addrmod1(cond | op * B21 | s, rn, rd, x);
|
| + } else if (!x.rm_.is_valid()) { // immediate data
|
| + addrmod1(cond | op * B21 | s, rn, rd, x);
|
| + } else if (!x.rs_.is_valid()) { // immediate shift
|
| + DataProcessingReg(op, s, rn, rd, x.rm_, x.shift_op_, x.shift_imm_);
|
| + // Go back immediately to avoid issues with bind() for now when
|
| + // the label points to an ARM instruction.
|
| + EnsureArmMode();
|
| + } else { // Register shift.
|
| + ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
|
| + addrmod1(cond | op * B21 | s, rn, rd, x);
|
| + }
|
| + if (rn.is(pc) || x.rm_.is(pc)) {
|
| + // Block constant pool emission for one instruction after reading pc.
|
| + BlockConstPoolBefore(pc_offset() + kInstrArmSize);
|
| + }
|
| +}
|
| +
|
| +void Assembler::DataProcessingImm(Opcode op, SBit s, Register rn, Register rd,
|
| + int imm) {
|
| + InstrArm i0 = 0xf000; // 1111 0iio ooon nnnn
|
| + switch (op) {
|
| + case ADD:
|
| + i0 |= 0x8 * B5; // 1000
|
| + break;
|
| + case AND:
|
| + i0 |= 0x0 * B5; // 0000
|
| + break;
|
| + case SUB:
|
| + i0 |= 0xd * B5; // 1101;
|
| + break;
|
| + default:
|
| + UNIMPLEMENTED();
|
| + }
|
| + if (s) {
|
| + i0 |= B4;
|
| + }
|
| + i0 |= (imm >> 11) * B10 | rn.code();
|
| + emit_thumb(i0);
|
| + emit_thumb(((imm >> 8) & 7) * B12 | rd.code() * B8 | (imm & 0xff));
|
| +}
|
| +
|
| +void Assembler::DataProcessingReg(Opcode op, SBit s, Register rn, Register rd,
|
| + Register rm, ShiftOp shiftOp, int shiftBy) {
|
| + InstrArm i0 = B15 | B14 | B13 | B11 | B9; // 1110 101o ooon nnnn
|
| + switch (op) {
|
| + case ADD:
|
| + i0 |= 0x8 * B5; // 1000
|
| + break;
|
| + case AND:
|
| + i0 |= 0x0 * B5; // 0000
|
| + break;
|
| + default:
|
| + UNIMPLEMENTED();
|
| + }
|
| +
|
| + if (s) {
|
| + i0 |= B4;
|
| + }
|
| + i0 |= rn.code();
|
| + emit_thumb(i0);
|
| + InstrArm i1 = ((shiftBy >> 2) & 7) * B12 | rd.code() * B8 |
|
| + (shiftBy & 3) * B6 | shiftOp * B4 | rm.code();
|
| + emit_thumb(i1);
|
| +}
|
| +
|
| +void Assembler::addrmod1(InstrArm instr,
|
| Register rn,
|
| Register rd,
|
| const Operand& x) {
|
| @@ -659,20 +712,20 @@
|
| instr |= I | rotate_imm*B8 | immed_8;
|
| } else if (!x.rs_.is_valid()) {
|
| // Immediate shift.
|
| - instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
|
| + instr |= x.shift_imm_*B7 | x.shift_op_*B5 | x.rm_.code();
|
| } else {
|
| // Register shift.
|
| ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
|
| - instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
|
| + instr |= x.rs_.code()*B8 | x.shift_op_*B5 | B4 | x.rm_.code();
|
| }
|
| - emit(instr | rn.code()*B16 | rd.code()*B12);
|
| + emit_arm(instr | rn.code()*B16 | rd.code()*B12);
|
| if (rn.is(pc) || x.rm_.is(pc))
|
| // Block constant pool emission for one instruction after reading pc.
|
| - BlockConstPoolBefore(pc_offset() + kInstrSize);
|
| + BlockConstPoolBefore(pc_offset() + kInstrArmSize);
|
| }
|
|
|
|
|
| -void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
|
| +void Assembler::addrmod2(InstrArm instr, Register rd, const MemOperand& x) {
|
| ASSERT((instr & ~(CondMask | B | L)) == B26);
|
| int am = x.am_;
|
| if (!x.rm_.is_valid()) {
|
| @@ -701,11 +754,11 @@
|
| instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
|
| }
|
| ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
|
| - emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
|
| + emit_arm(instr | am | x.rn_.code()*B16 | rd.code()*B12);
|
| }
|
|
|
|
|
| -void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
|
| +void Assembler::addrmod3(InstrArm instr, Register rd, const MemOperand& x) {
|
| ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
|
| ASSERT(x.rn_.is_valid());
|
| int am = x.am_;
|
| @@ -741,19 +794,19 @@
|
| instr |= x.rm_.code();
|
| }
|
| ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
|
| - emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
|
| + emit_arm(instr | am | x.rn_.code()*B16 | rd.code()*B12);
|
| }
|
|
|
|
|
| -void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
|
| +void Assembler::addrmod4(InstrArm instr, Register rn, RegList rl) {
|
| ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
|
| ASSERT(rl != 0);
|
| ASSERT(!rn.is(pc));
|
| - emit(instr | rn.code()*B16 | rl);
|
| + emit_arm(instr | rn.code()*B16 | rl);
|
| }
|
|
|
|
|
| -void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
|
| +void Assembler::addrmod5(InstrArm instr, CRegister crd, const MemOperand& x) {
|
| // Unindexed addressing is not encoded by this function.
|
| ASSERT_EQ((B27 | B26),
|
| (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
|
| @@ -774,7 +827,7 @@
|
| am |= W;
|
|
|
| ASSERT(offset_8 >= 0); // no masking needed
|
| - emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
|
| + emit_arm(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
|
| }
|
|
|
|
|
| @@ -793,7 +846,7 @@
|
|
|
| // Block the emission of the constant pool, since the branch instruction must
|
| // be emitted at the pc offset recorded by the label.
|
| - BlockConstPoolBefore(pc_offset() + kInstrSize);
|
| + BlockConstPoolBefore(pc_offset() + kInstrArmSize);
|
| return target_pos - (pc_offset() + kPcLoadDelta);
|
| }
|
|
|
| @@ -809,7 +862,8 @@
|
| target_pos = kEndOfChain;
|
| }
|
| L->link_to(at_offset);
|
| - instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
|
| + instr_arm_at_put(at_offset, target_pos +
|
| + (Code::kHeaderSize - kHeapObjectTag));
|
| }
|
| }
|
|
|
| @@ -819,7 +873,7 @@
|
| ASSERT((branch_offset & 3) == 0);
|
| int imm24 = branch_offset >> 2;
|
| ASSERT(is_int24(imm24));
|
| - emit(cond | B27 | B25 | (imm24 & Imm24Mask));
|
| + emit_arm(cond | B27 | B25 | (imm24 & Imm24Mask));
|
|
|
| if (cond == al)
|
| // Dead code is a good location to emit the constant pool.
|
| @@ -831,7 +885,7 @@
|
| ASSERT((branch_offset & 3) == 0);
|
| int imm24 = branch_offset >> 2;
|
| ASSERT(is_int24(imm24));
|
| - emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
|
| + emit_arm(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
|
| }
|
|
|
|
|
| @@ -841,21 +895,21 @@
|
| int h = ((branch_offset & 2) >> 1)*B24;
|
| int imm24 = branch_offset >> 2;
|
| ASSERT(is_int24(imm24));
|
| - emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
|
| + emit_arm(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
|
| }
|
|
|
|
|
| void Assembler::blx(Register target, Condition cond) { // v5 and above
|
| WriteRecordedPositions();
|
| ASSERT(!target.is(pc));
|
| - emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
|
| + emit_arm(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
|
| }
|
|
|
|
|
| void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
|
| WriteRecordedPositions();
|
| ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
|
| - emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
|
| + emit_arm(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
|
| }
|
|
|
|
|
| @@ -870,14 +924,14 @@
|
| ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
|
| ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
|
| ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
|
| - emit(cond | 0x3F*B21 | src3.imm32_*B16 |
|
| + emit_arm(cond | 0x3F*B21 | src3.imm32_*B16 |
|
| dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
|
| }
|
|
|
|
|
| void Assembler::and_(Register dst, Register src1, const Operand& src2,
|
| SBit s, Condition cond) {
|
| - addrmod1(cond | 0*B21 | s, src1, dst, src2);
|
| + DataProcessing(cond, AND, s, src1, dst, src2);
|
| }
|
|
|
|
|
| @@ -901,24 +955,11 @@
|
|
|
| void Assembler::add(Register dst, Register src1, const Operand& src2,
|
| SBit s, Condition cond) {
|
| - addrmod1(cond | 4*B21 | s, src1, dst, src2);
|
| -
|
| - // Eliminate pattern: push(r), pop()
|
| + DataProcessing(cond, ADD, s, src1, dst, src2);
|
| + // TODO(haustein): Eliminate pattern: push(r), pop()
|
| // str(src, MemOperand(sp, 4, NegPreIndex), al);
|
| // add(sp, sp, Operand(kPointerSize));
|
| - // Both instructions can be eliminated.
|
| - int pattern_size = 2 * kInstrSize;
|
| - if (FLAG_push_pop_elimination &&
|
| - last_bound_pos_ <= (pc_offset() - pattern_size) &&
|
| - reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
|
| - // Pattern.
|
| - instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
|
| - (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
|
| - pc_ -= 2 * kInstrSize;
|
| - if (FLAG_print_push_pop_elimination) {
|
| - PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
|
| - }
|
| - }
|
| + // Both instructions can be eliminated, as in assembler-arm.cc
|
| }
|
|
|
|
|
| @@ -989,7 +1030,7 @@
|
| void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
|
| SBit s, Condition cond) {
|
| ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
|
| - emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
|
| + emit_arm(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
|
| src2.code()*B8 | B7 | B4 | src1.code());
|
| }
|
|
|
| @@ -998,7 +1039,7 @@
|
| SBit s, Condition cond) {
|
| ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
|
| // dst goes in bits 16-19 for this instruction!
|
| - emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
|
| + emit_arm(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
|
| }
|
|
|
|
|
| @@ -1010,7 +1051,7 @@
|
| Condition cond) {
|
| ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
|
| ASSERT(!dstL.is(dstH));
|
| - emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
|
| + emit_arm(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
|
| src2.code()*B8 | B7 | B4 | src1.code());
|
| }
|
|
|
| @@ -1023,7 +1064,7 @@
|
| Condition cond) {
|
| ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
|
| ASSERT(!dstL.is(dstH));
|
| - emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
|
| + emit_arm(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
|
| src2.code()*B8 | B7 | B4 | src1.code());
|
| }
|
|
|
| @@ -1036,7 +1077,7 @@
|
| Condition cond) {
|
| ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
|
| ASSERT(!dstL.is(dstH));
|
| - emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
|
| + emit_arm(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
|
| src2.code()*B8 | B7 | B4 | src1.code());
|
| }
|
|
|
| @@ -1049,7 +1090,7 @@
|
| Condition cond) {
|
| ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
|
| ASSERT(!dstL.is(dstH));
|
| - emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
|
| + emit_arm(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
|
| src2.code()*B8 | B7 | B4 | src1.code());
|
| }
|
|
|
| @@ -1058,7 +1099,7 @@
|
| void Assembler::clz(Register dst, Register src, Condition cond) {
|
| // v5 and above.
|
| ASSERT(!dst.is(pc) && !src.is(pc));
|
| - emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
|
| + emit_arm(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
|
| 15*B8 | B4 | src.code());
|
| }
|
|
|
| @@ -1066,14 +1107,14 @@
|
| // Status register access instructions.
|
| void Assembler::mrs(Register dst, SRegister s, Condition cond) {
|
| ASSERT(!dst.is(pc));
|
| - emit(cond | B24 | s | 15*B16 | dst.code()*B12);
|
| + emit_arm(cond | B24 | s | 15*B16 | dst.code()*B12);
|
| }
|
|
|
|
|
| void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
|
| Condition cond) {
|
| ASSERT(fields >= B16 && fields < B20); // at least one field set
|
| - Instr instr;
|
| + InstrArm instr;
|
| if (!src.rm_.is_valid()) {
|
| // Immediate.
|
| uint32_t rotate_imm;
|
| @@ -1091,7 +1132,7 @@
|
| ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
|
| instr = src.rm_.code();
|
| }
|
| - emit(cond | instr | B24 | B21 | fields | 15*B12);
|
| + emit_arm(cond | instr | B24 | B21 | fields | 15*B12);
|
| }
|
|
|
|
|
| @@ -1106,14 +1147,16 @@
|
| // str(r, MemOperand(sp, 4, NegPreIndex), al)
|
| // ldr(r, MemOperand(sp, 4, PostIndex), al)
|
| // Both instructions can be eliminated.
|
| - int pattern_size = 2 * kInstrSize;
|
| + int pattern_size = 2 * kInstrArmSize;
|
| if (FLAG_push_pop_elimination &&
|
| last_bound_pos_ <= (pc_offset() - pattern_size) &&
|
| reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
|
| // Pattern.
|
| - instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
|
| - instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
|
| - pc_ -= 2 * kInstrSize;
|
| + instr_arm_at(pc_ - 1 * kInstrArmSize) ==
|
| + (kPopRegPattern | dst.code() * B12) &&
|
| + instr_arm_at(pc_ - 2 * kInstrArmSize) ==
|
| + (kPushRegPattern | dst.code() * B12)) {
|
| + pc_ -= 2 * kInstrArmSize;
|
| if (FLAG_print_push_pop_elimination) {
|
| PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
|
| }
|
| @@ -1127,15 +1170,16 @@
|
| // Eliminate pattern: pop(), push(r)
|
| // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
|
| // -> str r, [sp, 0], al
|
| - int pattern_size = 2 * kInstrSize;
|
| + int pattern_size = 2 * kInstrArmSize;
|
| if (FLAG_push_pop_elimination &&
|
| last_bound_pos_ <= (pc_offset() - pattern_size) &&
|
| reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
|
| // Pattern.
|
| - instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
|
| - instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
|
| - pc_ -= 2 * kInstrSize;
|
| - emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
|
| + instr_arm_at(pc_ - 1 * kInstrArmSize) ==
|
| + (kPushRegPattern | src.code() * B12) &&
|
| + instr_arm_at(pc_ - 2 * kInstrArmSize) == kPopInstruction) {
|
| + pc_ -= 2 * kInstrArmSize;
|
| + emit_arm(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
|
| if (FLAG_print_push_pop_elimination) {
|
| PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
|
| }
|
| @@ -1190,7 +1234,7 @@
|
| // recognize this case by checking if the emission of the pool was blocked
|
| // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
|
| // the case, we emit a jump over the pool.
|
| - CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
|
| + CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrArmSize);
|
| }
|
| }
|
|
|
| @@ -1207,7 +1251,7 @@
|
| void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
|
| ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
|
| ASSERT(!dst.is(base) && !src.is(base));
|
| - emit(cond | P | base.code()*B16 | dst.code()*B12 |
|
| + emit_arm(cond | P | base.code()*B16 | dst.code()*B12 |
|
| B7 | B4 | src.code());
|
| }
|
|
|
| @@ -1218,7 +1262,7 @@
|
| Condition cond) {
|
| ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
|
| ASSERT(!dst.is(base) && !src.is(base));
|
| - emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
|
| + emit_arm(cond | P | B | base.code()*B16 | dst.code()*B12 |
|
| B7 | B4 | src.code());
|
| }
|
|
|
| @@ -1227,7 +1271,7 @@
|
| void Assembler::stop(const char* msg) {
|
| #if !defined(__arm__)
|
| // The simulator handles these special instructions and stops execution.
|
| - emit(15 << 28 | ((intptr_t) msg));
|
| + emit_arm(15 << 28 | ((intptr_t) msg));
|
| #else
|
| // Just issue a simple break instruction for now. Alternatively we could use
|
| // the swi(0x9f0001) instruction on Linux.
|
| @@ -1238,13 +1282,13 @@
|
|
|
| void Assembler::bkpt(uint32_t imm16) { // v5 and above
|
| ASSERT(is_uint16(imm16));
|
| - emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
|
| + emit_arm(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
|
| }
|
|
|
|
|
| void Assembler::swi(uint32_t imm24, Condition cond) {
|
| ASSERT(is_uint24(imm24));
|
| - emit(cond | 15*B24 | imm24);
|
| + emit_arm(cond | 15*B24 | imm24);
|
| }
|
|
|
|
|
| @@ -1257,7 +1301,7 @@
|
| int opcode_2,
|
| Condition cond) {
|
| ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
|
| - emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
|
| + emit_arm(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
|
| crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
|
| }
|
|
|
| @@ -1280,7 +1324,7 @@
|
| int opcode_2,
|
| Condition cond) {
|
| ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
|
| - emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
|
| + emit_arm(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
|
| rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
|
| }
|
|
|
| @@ -1303,7 +1347,7 @@
|
| int opcode_2,
|
| Condition cond) {
|
| ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
|
| - emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
|
| + emit_arm(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
|
| rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
|
| }
|
|
|
| @@ -1335,7 +1379,7 @@
|
| Condition cond) {
|
| // Unindexed addressing.
|
| ASSERT(is_uint8(option));
|
| - emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
|
| + emit_arm(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
|
| coproc*B8 | (option & 255));
|
| }
|
|
|
| @@ -1374,7 +1418,7 @@
|
| Condition cond) {
|
| // Unindexed addressing.
|
| ASSERT(is_uint8(option));
|
| - emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
|
| + emit_arm(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
|
| coproc*B8 | (option & 255));
|
| }
|
|
|
| @@ -1407,7 +1451,7 @@
|
| // Vdst(15-12) | 1011(11-8) | offset
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| ASSERT(offset % 4 == 0);
|
| - emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
|
| + emit_arm(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
|
| 0xB*B8 | ((offset / 4) & 255));
|
| }
|
|
|
| @@ -1422,7 +1466,7 @@
|
| // Vsrc(15-12) | 1011(11-8) | (offset/4)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| ASSERT(offset % 4 == 0);
|
| - emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
|
| + emit_arm(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
|
| 0xB*B8 | ((offset / 4) & 255));
|
| }
|
|
|
| @@ -1437,7 +1481,7 @@
|
| // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| ASSERT(!src1.is(pc) && !src2.is(pc));
|
| - emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
|
| + emit_arm(cond | 0xC*B24 | B22 | src2.code()*B16 |
|
| src1.code()*B12 | 0xB*B8 | B4 | dst.code());
|
| }
|
|
|
| @@ -1452,7 +1496,7 @@
|
| // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| ASSERT(!dst1.is(pc) && !dst2.is(pc));
|
| - emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
|
| + emit_arm(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
|
| dst1.code()*B12 | 0xB*B8 | B4 | src.code());
|
| }
|
|
|
| @@ -1466,7 +1510,7 @@
|
| // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| ASSERT(!src.is(pc));
|
| - emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
|
| + emit_arm(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
|
| src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
|
| }
|
|
|
| @@ -1480,7 +1524,7 @@
|
| // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| ASSERT(!dst.is(pc));
|
| - emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
|
| + emit_arm(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
|
| dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
|
| }
|
|
|
| @@ -1493,7 +1537,7 @@
|
| // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
|
| // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| - emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
|
| + emit_arm(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
|
| dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
|
| (0x1 & src.code())*B5 | (src.code() >> 1));
|
| }
|
| @@ -1507,7 +1551,7 @@
|
| // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
|
| // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| - emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
|
| + emit_arm(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
|
| 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
|
| 0x5*B9 | B8 | B7 | B6 | src.code());
|
| }
|
| @@ -1523,7 +1567,7 @@
|
| // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
|
| // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| - emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
|
| + emit_arm(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
|
| dst.code()*B12 | 0x5*B9 | B8 | src2.code());
|
| }
|
|
|
| @@ -1538,7 +1582,7 @@
|
| // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
|
| // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| - emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
|
| + emit_arm(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
|
| dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
|
| }
|
|
|
| @@ -1553,7 +1597,7 @@
|
| // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
|
| // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| - emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
|
| + emit_arm(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
|
| dst.code()*B12 | 0x5*B9 | B8 | src2.code());
|
| }
|
|
|
| @@ -1568,7 +1612,7 @@
|
| // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
|
| // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| - emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
|
| + emit_arm(cond | 0xE*B24 | B23 | src1.code()*B16 |
|
| dst.code()*B12 | 0x5*B9 | B8 | src2.code());
|
| }
|
|
|
| @@ -1582,7 +1626,7 @@
|
| // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
|
| // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| - emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
|
| + emit_arm(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
|
| src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
|
| }
|
|
|
| @@ -1592,7 +1636,7 @@
|
| // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
|
| // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
|
| ASSERT(CpuFeatures::IsEnabled(VFP3));
|
| - emit(cond | 0xE*B24 | 0xF*B20 | B16 |
|
| + emit_arm(cond | 0xE*B24 | 0xF*B20 | B16 |
|
| dst.code()*B12 | 0xA*B8 | B4);
|
| }
|
|
|
| @@ -1634,7 +1678,7 @@
|
|
|
|
|
| void Assembler::BlockConstPoolFor(int instructions) {
|
| - BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
|
| + BlockConstPoolBefore(pc_offset() + instructions * kInstrArmSize);
|
| }
|
|
|
|
|
| @@ -1752,7 +1796,7 @@
|
| prinfo_[num_prinfo_++] = rinfo;
|
| // Make sure the constant pool is not emitted in place of the next
|
| // instruction for which we just recorded relocation info.
|
| - BlockConstPoolBefore(pc_offset() + kInstrSize);
|
| + BlockConstPoolBefore(pc_offset() + kInstrArmSize);
|
| }
|
| if (rinfo.rmode() != RelocInfo::NONE) {
|
| // Don't record external references unless the heap will be serialized.
|
| @@ -1813,18 +1857,18 @@
|
| return;
|
| }
|
|
|
| - int jump_instr = require_jump ? kInstrSize : 0;
|
| + int jump_instr = require_jump ? kInstrArmSize : 0;
|
|
|
| // Check that the code buffer is large enough before emitting the constant
|
| // pool and relocation information (include the jump over the pool and the
|
| // constant pool marker).
|
| int max_needed_space =
|
| - jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
|
| + jump_instr + kInstrArmSize + num_prinfo_*(kInstrArmSize + kMaxRelocSize);
|
| while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
|
|
|
| // Block recursive calls to CheckConstPool.
|
| - BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
|
| - num_prinfo_*kInstrSize);
|
| + BlockConstPoolBefore(pc_offset() + jump_instr + kInstrArmSize +
|
| + num_prinfo_*kInstrArmSize);
|
| // Don't bother to check for the emit calls below.
|
| next_buffer_check_ = no_const_pool_before_;
|
|
|
| @@ -1836,7 +1880,7 @@
|
|
|
| // Put down constant pool marker "Undefined instruction" as specified by
|
| // A3.1 Instruction set encoding.
|
| - emit(0x03000000 | num_prinfo_);
|
| + emit_int32(0x03000000 | num_prinfo_);
|
|
|
| // Emit constant pool entries.
|
| for (int i = 0; i < num_prinfo_; i++) {
|
| @@ -1844,7 +1888,7 @@
|
| ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
|
| rinfo.rmode() != RelocInfo::POSITION &&
|
| rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
|
| - Instr instr = instr_at(rinfo.pc());
|
| + InstrArm instr = instr_arm_at(rinfo.pc());
|
|
|
| // Instruction to patch must be a ldr/str [pc, #offset].
|
| // P and U set, B and W clear, Rn == pc, offset12 still 0.
|
| @@ -1857,8 +1901,8 @@
|
| delta = -delta;
|
| }
|
| ASSERT(is_uint12(delta));
|
| - instr_at_put(rinfo.pc(), instr + delta);
|
| - emit(rinfo.data());
|
| + instr_arm_at_put(rinfo.pc(), instr + delta);
|
| + emit_int32(rinfo.data());
|
| }
|
| num_prinfo_ = 0;
|
| last_const_pool_end_ = pc_offset();
|
|
|