Index: src/mips64/macro-assembler-mips64.cc |
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc |
index a505f4acb81f524c82362eebe87e918a9bcbf9d6..a832c59a10c75e502019fbfe5f38bfd21be01c31 100644 |
--- a/src/mips64/macro-assembler-mips64.cc |
+++ b/src/mips64/macro-assembler-mips64.cc |
@@ -622,7 +622,7 @@ |
addu(rd, rs, rt.rm()); |
} else { |
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- addiu(rd, rs, static_cast<int32_t>(rt.imm64_)); |
+ addiu(rd, rs, rt.imm64_); |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -638,7 +638,7 @@ |
daddu(rd, rs, rt.rm()); |
} else { |
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- daddiu(rd, rs, static_cast<int32_t>(rt.imm64_)); |
+ daddiu(rd, rs, rt.imm64_); |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -654,8 +654,7 @@ |
subu(rd, rs, rt.rm()); |
} else { |
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- addiu(rd, rs, static_cast<int32_t>( |
- -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). |
+ addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm). |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -671,9 +670,7 @@ |
dsubu(rd, rs, rt.rm()); |
} else { |
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- daddiu(rd, rs, |
- static_cast<int32_t>( |
- -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). |
+ daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm). |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -1070,7 +1067,7 @@ |
and_(rd, rs, rt.rm()); |
} else { |
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- andi(rd, rs, static_cast<int32_t>(rt.imm64_)); |
+ andi(rd, rs, rt.imm64_); |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -1086,7 +1083,7 @@ |
or_(rd, rs, rt.rm()); |
} else { |
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- ori(rd, rs, static_cast<int32_t>(rt.imm64_)); |
+ ori(rd, rs, rt.imm64_); |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -1102,7 +1099,7 @@ |
xor_(rd, rs, rt.rm()); |
} else { |
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- xori(rd, rs, static_cast<int32_t>(rt.imm64_)); |
+ xori(rd, rs, rt.imm64_); |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -1139,7 +1136,7 @@ |
slt(rd, rs, rt.rm()); |
} else { |
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- slti(rd, rs, static_cast<int32_t>(rt.imm64_)); |
+ slti(rd, rs, rt.imm64_); |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -1155,7 +1152,7 @@ |
sltu(rd, rs, rt.rm()); |
} else { |
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { |
- sltiu(rd, rs, static_cast<int32_t>(rt.imm64_)); |
+ sltiu(rd, rs, rt.imm64_); |
} else { |
// li handles the relocation. |
DCHECK(!rs.is(at)); |
@@ -2334,7 +2331,7 @@ |
if (rt.imm64_ == 0) { |
bgez(rs, offset); |
} else if (is_int16(rt.imm64_)) { |
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
+ slti(scratch, rs, rt.imm64_); |
beq(scratch, zero_reg, offset); |
} else { |
r2 = scratch; |
@@ -2347,7 +2344,7 @@ |
if (rt.imm64_ == 0) { |
bltz(rs, offset); |
} else if (is_int16(rt.imm64_)) { |
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
+ slti(scratch, rs, rt.imm64_); |
bne(scratch, zero_reg, offset); |
} else { |
r2 = scratch; |
@@ -2381,7 +2378,7 @@ |
if (rt.imm64_ == 0) { |
b(offset); |
} else if (is_int16(rt.imm64_)) { |
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
+ sltiu(scratch, rs, rt.imm64_); |
beq(scratch, zero_reg, offset); |
} else { |
r2 = scratch; |
@@ -2395,7 +2392,7 @@ |
// No code needs to be emitted. |
return; |
} else if (is_int16(rt.imm64_)) { |
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
+ sltiu(scratch, rs, rt.imm64_); |
bne(scratch, zero_reg, offset); |
} else { |
r2 = scratch; |
@@ -2601,7 +2598,7 @@ |
offset = shifted_branch_offset(L, false); |
bgez(rs, offset); |
} else if (is_int16(rt.imm64_)) { |
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
+ slti(scratch, rs, rt.imm64_); |
offset = shifted_branch_offset(L, false); |
beq(scratch, zero_reg, offset); |
} else { |
@@ -2618,7 +2615,7 @@ |
offset = shifted_branch_offset(L, false); |
bltz(rs, offset); |
} else if (is_int16(rt.imm64_)) { |
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
+ slti(scratch, rs, rt.imm64_); |
offset = shifted_branch_offset(L, false); |
bne(scratch, zero_reg, offset); |
} else { |
@@ -2662,7 +2659,7 @@ |
offset = shifted_branch_offset(L, false); |
b(offset); |
} else if (is_int16(rt.imm64_)) { |
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
+ sltiu(scratch, rs, rt.imm64_); |
offset = shifted_branch_offset(L, false); |
beq(scratch, zero_reg, offset); |
} else { |
@@ -2679,7 +2676,7 @@ |
// No code needs to be emitted. |
return; |
} else if (is_int16(rt.imm64_)) { |
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
+ sltiu(scratch, rs, rt.imm64_); |
offset = shifted_branch_offset(L, false); |
bne(scratch, zero_reg, offset); |
} else { |
@@ -3459,7 +3456,7 @@ |
Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); |
} |
// Load allocation limit into t9. Result already contains allocation top. |
- ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top))); |
+ ld(t9, MemOperand(topaddr, limit - top)); |
} |
DCHECK(kPointerSize == kDoubleSize); |
@@ -3535,7 +3532,7 @@ |
Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); |
} |
// Load allocation limit into t9. Result already contains allocation top. |
- ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top))); |
+ ld(t9, MemOperand(topaddr, limit - top)); |
} |
DCHECK(kPointerSize == kDoubleSize); |
@@ -4463,18 +4460,17 @@ |
} else { |
if (dst.is(left)) { |
mov(scratch, left); // Preserve left. |
- daddiu(dst, left, |
- static_cast<int32_t>(right.immediate())); // Left is overwritten. |
+ daddiu(dst, left, right.immediate()); // Left is overwritten. |
xor_(scratch, dst, scratch); // Original left. |
// Load right since xori takes uint16 as immediate. |
- daddiu(t9, zero_reg, static_cast<int32_t>(right.immediate())); |
+ daddiu(t9, zero_reg, right.immediate()); |
xor_(overflow_dst, dst, t9); |
and_(overflow_dst, overflow_dst, scratch); |
} else { |
- daddiu(dst, left, static_cast<int32_t>(right.immediate())); |
+ daddiu(dst, left, right.immediate()); |
xor_(overflow_dst, dst, left); |
// Load right since xori takes uint16 as immediate. |
- daddiu(t9, zero_reg, static_cast<int32_t>(right.immediate())); |
+ daddiu(t9, zero_reg, right.immediate()); |
xor_(scratch, dst, t9); |
and_(overflow_dst, scratch, overflow_dst); |
} |
@@ -4533,18 +4529,17 @@ |
} else { |
if (dst.is(left)) { |
mov(scratch, left); // Preserve left. |
- daddiu(dst, left, |
- static_cast<int32_t>(-right.immediate())); // Left is overwritten. |
+ daddiu(dst, left, -(right.immediate())); // Left is overwritten. |
xor_(overflow_dst, dst, scratch); // scratch is original left. |
// Load right since xori takes uint16 as immediate. |
- daddiu(t9, zero_reg, static_cast<int32_t>(right.immediate())); |
+ daddiu(t9, zero_reg, right.immediate()); |
xor_(scratch, scratch, t9); // scratch is original left. |
and_(overflow_dst, scratch, overflow_dst); |
} else { |
- daddiu(dst, left, static_cast<int32_t>(-right.immediate())); |
+ daddiu(dst, left, -(right.immediate())); |
xor_(overflow_dst, dst, left); |
// Load right since xori takes uint16 as immediate. |
- daddiu(t9, zero_reg, static_cast<int32_t>(right.immediate())); |
+ daddiu(t9, zero_reg, right.immediate()); |
xor_(scratch, left, t9); |
and_(overflow_dst, scratch, overflow_dst); |
} |
@@ -4845,7 +4840,8 @@ |
ld(scratch, |
MemOperand(scratch, |
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); |
- int offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize; |
+ size_t offset = expected_kind * kPointerSize + |
+ FixedArrayBase::kHeaderSize; |
ld(at, FieldMemOperand(scratch, offset)); |
Branch(no_map_match, ne, map_in_out, Operand(at)); |