Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(27)

Unified Diff: src/arm/assembler-arm.cc

Issue 24793002: Thumb2 Backend: Make arithmetic instructions set condition codes by default Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/arm/assembler-arm.cc
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index c82c8728708ab8fdd26bfa96adee7a7051e85f6c..ff3f13edc8c13f607fd35c6d87bd89842aaf39f9 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -928,6 +928,7 @@ void Assembler::next(Label* L) {
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
+ SBitMode* smode,
Instr* instr) {
// imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
@@ -942,21 +943,21 @@ static bool fits_shifter(uint32_t imm32,
// immediate fits, change the opcode.
if (instr != NULL) {
if ((*instr & kMovMvnMask) == kMovMvnPattern) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, smode, NULL)) {
*instr ^= kMovMvnFlip;
return true;
- } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
- if (CpuFeatures::IsSupported(ARMv7)) {
- if (imm32 < 0x10000) {
- *instr ^= kMovwLeaveCCFlip;
- *instr |= EncodeMovwImmediate(imm32);
- *rotate_imm = *immed_8 = 0; // Not used for movw.
- return true;
- }
- }
+ } else if (CpuFeatures::IsSupported(ARMv7) &&
+ *smode != SetCC &&
+ imm32 < 0x10000) {
+ *smode = LeaveCC; // might have been DontCareCC
+ *instr ^= kMovwLeaveCCFlip;
+ *instr |= EncodeMovwImmediate(imm32);
+ *rotate_imm = *immed_8 = 0; // Not used for movw.
+ return true;
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(-static_cast<int>(imm32), rotate_imm,
+ immed_8, NULL, NULL)) {
*instr ^= kCmpCmnFlip;
return true;
}
@@ -964,13 +965,14 @@ static bool fits_shifter(uint32_t imm32,
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD ||
alu_insn == SUB) {
- if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(-static_cast<int>(imm32), rotate_imm,
+ immed_8, smode, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
} else if (alu_insn == AND ||
alu_insn == BIC) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, smode, NULL)) {
*instr ^= kAndBicFlip;
return true;
}
@@ -1016,8 +1018,9 @@ bool Operand::is_single_instruction(const Assembler* assembler,
Instr instr) const {
if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2;
+ SBitMode smode = (instr & S) != 0 ? SetCC : LeaveCC;
if (must_output_reloc_info(assembler) ||
- !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
+ !fits_shifter(imm32_, &dummy1, &dummy2, &smode, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
@@ -1039,10 +1042,10 @@ bool Operand::is_single_instruction(const Assembler* assembler,
void Assembler::move_32_bit_immediate_thumb(Register rd,
- SBit s,
+ SBitMode smode,
const Operand& x,
Condition cond) {
- if (rd.code() != pc.code() && s == LeaveCC) {
+ if (rd.code() != pc.code() && smode != SetCC) {
if (use_movw_movt(x, this)) {
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
@@ -1066,9 +1069,8 @@ void Assembler::move_32_bit_immediate_thumb(Register rd,
void Assembler::move_32_bit_immediate(Condition cond,
Register rd,
- SBit s,
const Operand& x) {
- if (rd.code() != pc.code() && s == LeaveCC) {
+ if (rd.code() != pc.code()) {
if (use_movw_movt(x, this)) {
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
@@ -1088,43 +1090,39 @@ void Assembler::move_32_bit_immediate(Condition cond,
void Assembler::addrmod1(Instr instr,
+ SBitMode smode,
Register rn,
Register rd,
const Operand& x) {
CheckBuffer();
- ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
+ ASSERT((instr & ~(kCondMask | kOpCodeMask)) == 0);
if (!x.rm_.is_valid()) {
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (x.must_output_reloc_info(this) ||
- !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, so load
- // it first to register ip and change the original instruction to use ip.
- // However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'.
- CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
+ if (!x.must_output_reloc_info(this) &&
+ fits_shifter(x.imm32_, &rotate_imm, &immed_8, &smode, &instr)) {
+ // The immediate operand can be encoded directly in the shifter.
+ instr |= I | rotate_imm * B8 | immed_8;
+ } else {
+ // The immediate operand cannot be encoded as a shifter operand. We will
+ // need to generate extra instructions.
+ CHECK(!rn.is(ip));
Condition cond = Instruction::ConditionField(instr);
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- move_32_bit_immediate(cond, rd, LeaveCC, x);
+ if ((instr & kMovMvnMask) == kMovMvnPattern && smode != SetCC) {
+ // If this is a move that doesn't set flags, we can just load directly
+ // into rd using movw/movt or a load from the constant pool.
+ move_32_bit_immediate(cond, rd, x);
+ return;
} else {
- if ((instr & kMovMvnMask) == kMovMvnPattern) {
- // Moves need to use a constant pool entry.
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(ip, MemOperand(pc, 0), cond);
- } else if (x.must_output_reloc_info(this)) {
- // Otherwise, use most efficient form of fetching from constant pool.
- move_32_bit_immediate(cond, ip, LeaveCC, x);
- } else {
- // If this is not a mov or mvn instruction we may still be able to
- // avoid a constant pool entry by using mvn or movw.
- mov(ip, x, LeaveCC, cond);
- }
- addrmod1(instr, rn, rd, Operand(ip));
+ // Otherwise, we move the value into ip. This could be encoded as
+ // mvn, movw/movt, or a constant pool load: whatever is most efficient.
+ SBitMode mov_smode = smode == LeaveCC ? LeaveCC : DontCareCC;
+ mov(ip, x, mov_smode, cond);
+ addrmod1(instr, smode, rn, rd, Operand(ip));
+ return;
}
- return;
}
- instr |= I | rotate_imm*B8 | immed_8;
} else if (!x.rs_.is_valid()) {
// Immediate shift.
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
@@ -1133,7 +1131,8 @@ void Assembler::addrmod1(Instr instr,
ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
- emit(instr | rn.code()*B16 | rd.code()*B12);
+ SBit s = sbit_from_mode(smode);
+ emit(instr | s | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolFor(1);
@@ -1363,78 +1362,78 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
// Data-processing instructions.
void Assembler::and_(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- and_thumb(dst, src1, src2, s, cond);
+ and_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | AND | s, src1, dst, src2);
+ addrmod1(cond | AND, smode, src1, dst, src2);
}
void Assembler::eor(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- eor_thumb(dst, src1, src2, s, cond);
+ eor_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | EOR | s, src1, dst, src2);
+ addrmod1(cond | EOR, smode, src1, dst, src2);
}
void Assembler::sub(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- sub_thumb(dst, src1, src2, s, cond);
+ sub_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | SUB | s, src1, dst, src2);
+ addrmod1(cond | SUB, smode, src1, dst, src2);
}
void Assembler::rsb(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- rsb_thumb(dst, src1, src2, s, cond);
+ rsb_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | RSB | s, src1, dst, src2);
+ addrmod1(cond | RSB, smode, src1, dst, src2);
}
void Assembler::add(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- add_thumb(dst, src1, src2, s, cond);
+ add_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | ADD | s, src1, dst, src2);
+ addrmod1(cond | ADD, smode, src1, dst, src2);
}
void Assembler::adc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- adc_thumb(dst, src1, src2, s, cond);
+ adc_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | ADC | s, src1, dst, src2);
+ addrmod1(cond | ADC, smode, src1, dst, src2);
}
void Assembler::sbc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- sbc_thumb(dst, src1, src2, s, cond);
+ sbc_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | SBC | s, src1, dst, src2);
+ addrmod1(cond | SBC, smode, src1, dst, src2);
}
void Assembler::rsc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | RSC | s, src1, dst, src2);
+ SBitMode smode, Condition cond) {
+ addrmod1(cond | RSC, smode, src1, dst, src2);
}
@@ -1443,7 +1442,7 @@ void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
tst_thumb(src1, src2, cond);
return;
}
- addrmod1(cond | TST | S, src1, r0, src2);
+ addrmod1(cond | TST, SetCC, src1, r0, src2);
}
@@ -1452,7 +1451,7 @@ void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
teq_thumb(src1, src2, cond);
return;
}
- addrmod1(cond | TEQ | S, src1, r0, src2);
+ addrmod1(cond | TEQ, SetCC, src1, r0, src2);
}
@@ -1461,7 +1460,7 @@ void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
cmp_thumb(src1, src2, cond);
return;
}
- addrmod1(cond | CMP | S, src1, r0, src2);
+ addrmod1(cond | CMP, SetCC, src1, r0, src2);
}
@@ -1477,38 +1476,50 @@ void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
cmn_thumb(src1, src2, cond);
return;
}
- addrmod1(cond | CMN | S, src1, r0, src2);
+ addrmod1(cond | CMN, SetCC, src1, r0, src2);
}
void Assembler::orr(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- orr_thumb(dst, src1, src2, s, cond);
+ orr_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | ORR | s, src1, dst, src2);
+ addrmod1(cond | ORR, smode, src1, dst, src2);
}
-void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+void Assembler::mov(Register dst,
+ const Operand& src,
+ SBitMode smode,
+ Condition cond) {
if (dst.is(pc)) {
positions_recorder()->WriteRecordedPositions();
}
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
- ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
+ ASSERT(!(src.is_reg() && src.rm().is(dst) && smode != SetCC && cond == al));
if (is_thumb_mode()) {
- mov_thumb(dst, src, s, cond);
+ mov_thumb(dst, src, smode, cond);
return;
}
- addrmod1(cond | MOV | s, r0, dst, src);
+ if (dst.code() == 15 && smode == DontCareCC) {
+ smode = LeaveCC;
+ }
+ addrmod1(cond | MOV, smode, r0, dst, src);
}
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000);
+ if (is_thumb_mode()) {
+ ASSERT(cond == al);
+ mov_imm_t3(reg, Operand(immediate), LeaveCCBit, al);
+ return;
+ }
+
// May use movw if supported, but on unsupported platforms will try to use
// equivalent rotated immed_8 value and other tricks before falling back to a
// constant pool load.
@@ -1526,32 +1537,34 @@ void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
void Assembler::bic(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- bic_thumb(dst, src1, src2, s, cond);
+ bic_thumb(dst, src1, src2, smode, cond);
return;
}
- addrmod1(cond | BIC | s, src1, dst, src2);
+ addrmod1(cond | BIC, smode, src1, dst, src2);
}
-void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
+void Assembler::mvn(Register dst, const Operand& src,
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- mvn_thumb(dst, src, s, cond);
+ mvn_thumb(dst, src, smode, cond);
return;
}
- addrmod1(cond | MVN | s, r0, dst, src);
+ addrmod1(cond | MVN, smode, r0, dst, src);
}
// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
if (is_thumb_mode()) {
- mla_thumb(dst, src1, src2, srcA, s, cond);
+ mla_thumb(dst, src1, src2, srcA, smode, cond);
return;
}
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ SBit s = sbit_from_mode(smode);
emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1583,12 +1596,13 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
void Assembler::mul(Register dst, Register src1, Register src2,
- SBit s, Condition cond) {
+ SBitMode smode, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
if (is_thumb_mode()) {
- mul_thumb(dst, src1, src2, s, cond);
+ mul_thumb(dst, src1, src2, smode, cond);
return;
}
+ SBit s = sbit_from_mode(smode);
// dst goes in bits 16-19 for this instruction!
emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1598,14 +1612,15 @@ void Assembler::smlal(Register dstL,
Register dstH,
Register src1,
Register src2,
- SBit s,
+ SBitMode smode,
Condition cond) {
if (is_thumb_mode()) {
- smlal_thumb(dstL, dstH, src1, src2, s, cond);
+ smlal_thumb(dstL, dstH, src1, src2, smode, cond);
return;
}
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH));
+ SBit s = smode == SetCC ? SetCCBit : LeaveCCBit;
emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1615,14 +1630,15 @@ void Assembler::smull(Register dstL,
Register dstH,
Register src1,
Register src2,
- SBit s,
+ SBitMode smode,
Condition cond) {
if (is_thumb_mode()) {
- smull_thumb(dstL, dstH, src1, src2, s, cond);
+ smull_thumb(dstL, dstH, src1, src2, smode, cond);
return;
}
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH));
+ SBit s = smode == SetCC ? SetCCBit : LeaveCCBit;
emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1632,14 +1648,15 @@ void Assembler::umlal(Register dstL,
Register dstH,
Register src1,
Register src2,
- SBit s,
+ SBitMode smode,
Condition cond) {
if (is_thumb_mode()) {
- umlal_thumb(dstL, dstH, src1, src2, s, cond);
+ umlal_thumb(dstL, dstH, src1, src2, smode, cond);
return;
}
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH));
+ SBit s = smode == SetCC ? SetCCBit : LeaveCCBit;
emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1649,14 +1666,15 @@ void Assembler::umull(Register dstL,
Register dstH,
Register src1,
Register src2,
- SBit s,
+ SBitMode smode,
Condition cond) {
if (is_thumb_mode()) {
- umull_thumb(dstL, dstH, src1, src2, s, cond);
+ umull_thumb(dstL, dstH, src1, src2, smode, cond);
return;
}
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH));
+ SBit s = smode == SetCC ? SetCCBit : LeaveCCBit;
emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1933,7 +1951,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
uint32_t rotate_imm;
uint32_t immed_8;
if (src.must_output_reloc_info(this) ||
- !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
+ !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
ldr(ip, MemOperand(pc, 0), cond);
@@ -3287,7 +3305,7 @@ bool Assembler::IsNop(Instr instr, int type) {
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
uint32_t dummy2;
- return fits_shifter(imm32, &dummy1, &dummy2, NULL);
+ return fits_shifter(imm32, &dummy1, &dummy2, NULL, NULL);
}
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698